Bug Summary

File:llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Warning:line 11140, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ARMAsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/ARM/AsmParser -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/ARM/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp

1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMFeatures.h"
10#include "ARMBaseInstrInfo.h"
11#include "Utils/ARMBaseInfo.h"
12#include "MCTargetDesc/ARMAddressingModes.h"
13#include "MCTargetDesc/ARMBaseInfo.h"
14#include "MCTargetDesc/ARMInstPrinter.h"
15#include "MCTargetDesc/ARMMCExpr.h"
16#include "MCTargetDesc/ARMMCTargetDesc.h"
17#include "TargetInfo/ARMTargetInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringSet.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/StringSwitch.h"
28#include "llvm/ADT/Triple.h"
29#include "llvm/ADT/Twine.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
33#include "llvm/MC/MCInstrDesc.h"
34#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCObjectFileInfo.h"
36#include "llvm/MC/MCParser/MCAsmLexer.h"
37#include "llvm/MC/MCParser/MCAsmParser.h"
38#include "llvm/MC/MCParser/MCAsmParserExtension.h"
39#include "llvm/MC/MCParser/MCAsmParserUtils.h"
40#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
41#include "llvm/MC/MCParser/MCTargetAsmParser.h"
42#include "llvm/MC/MCRegisterInfo.h"
43#include "llvm/MC/MCSection.h"
44#include "llvm/MC/MCStreamer.h"
45#include "llvm/MC/MCSubtargetInfo.h"
46#include "llvm/MC/MCSymbol.h"
47#include "llvm/MC/SubtargetFeature.h"
48#include "llvm/Support/ARMBuildAttributes.h"
49#include "llvm/Support/ARMEHABI.h"
50#include "llvm/Support/Casting.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Compiler.h"
53#include "llvm/Support/ErrorHandling.h"
54#include "llvm/Support/MathExtras.h"
55#include "llvm/Support/SMLoc.h"
56#include "llvm/Support/TargetParser.h"
57#include "llvm/Support/TargetRegistry.h"
58#include "llvm/Support/raw_ostream.h"
59#include <algorithm>
60#include <cassert>
61#include <cstddef>
62#include <cstdint>
63#include <iterator>
64#include <limits>
65#include <memory>
66#include <string>
67#include <utility>
68#include <vector>
69
70#define DEBUG_TYPE"asm-parser" "asm-parser"
71
72using namespace llvm;
73
74namespace llvm {
75extern const MCInstrDesc ARMInsts[];
76} // end namespace llvm
77
78namespace {
79
80enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
81
82static cl::opt<ImplicitItModeTy> ImplicitItMode(
83 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
84 cl::desc("Allow conditional instructions outdside of an IT block"),
85 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
86 "Accept in both ISAs, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
,
87 clEnumValN(ImplicitItModeTy::Never, "never",llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
88 "Warn in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
,
89 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
90 "Accept in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
,
91 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
92 "Warn in ARM, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
));
93
94static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
95 cl::init(false));
96
97enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
98
99static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
100 // Position==0 means we're not in an IT block at all. Position==1
101 // means we want the first state bit, which is always 0 (Then).
102 // Position==2 means we want the second state bit, stored at bit 3
103 // of Mask, and so on downwards. So (5 - Position) will shift the
104 // right bit down to bit 0, including the always-0 bit at bit 4 for
105 // the mandatory initial Then.
106 return (Mask >> (5 - Position) & 1);
107}
108
109class UnwindContext {
110 using Locs = SmallVector<SMLoc, 4>;
111
112 MCAsmParser &Parser;
113 Locs FnStartLocs;
114 Locs CantUnwindLocs;
115 Locs PersonalityLocs;
116 Locs PersonalityIndexLocs;
117 Locs HandlerDataLocs;
118 int FPReg;
119
120public:
121 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
122
123 bool hasFnStart() const { return !FnStartLocs.empty(); }
124 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
125 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
126
127 bool hasPersonality() const {
128 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
129 }
130
131 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
132 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
133 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
134 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
135 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
136
137 void saveFPReg(int Reg) { FPReg = Reg; }
138 int getFPReg() const { return FPReg; }
139
140 void emitFnStartLocNotes() const {
141 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
142 FI != FE; ++FI)
143 Parser.Note(*FI, ".fnstart was specified here");
144 }
145
146 void emitCantUnwindLocNotes() const {
147 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
148 UE = CantUnwindLocs.end(); UI != UE; ++UI)
149 Parser.Note(*UI, ".cantunwind was specified here");
150 }
151
152 void emitHandlerDataLocNotes() const {
153 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
154 HE = HandlerDataLocs.end(); HI != HE; ++HI)
155 Parser.Note(*HI, ".handlerdata was specified here");
156 }
157
158 void emitPersonalityLocNotes() const {
159 for (Locs::const_iterator PI = PersonalityLocs.begin(),
160 PE = PersonalityLocs.end(),
161 PII = PersonalityIndexLocs.begin(),
162 PIE = PersonalityIndexLocs.end();
163 PI != PE || PII != PIE;) {
164 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165 Parser.Note(*PI++, ".personality was specified here");
166 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167 Parser.Note(*PII++, ".personalityindex was specified here");
168 else
169 llvm_unreachable(".personality and .personalityindex cannot be "::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be "
"at the same location", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 170)
170 "at the same location")::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be "
"at the same location", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 170)
;
171 }
172 }
173
174 void reset() {
175 FnStartLocs = Locs();
176 CantUnwindLocs = Locs();
177 PersonalityLocs = Locs();
178 HandlerDataLocs = Locs();
179 PersonalityIndexLocs = Locs();
180 FPReg = ARM::SP;
181 }
182};
183
184// Various sets of ARM instruction mnemonics which are used by the asm parser
185class ARMMnemonicSets {
186 StringSet<> CDE;
187 StringSet<> CDEWithVPTSuffix;
188public:
189 ARMMnemonicSets(const MCSubtargetInfo &STI);
190
191 /// Returns true iff a given mnemonic is a CDE instruction
192 bool isCDEInstr(StringRef Mnemonic) {
193 // Quick check before searching the set
194 if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
195 return false;
196 return CDE.count(Mnemonic);
197 }
198
199 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200 /// (possibly with a predication suffix "e" or "t")
201 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202 if (!Mnemonic.startswith("vcx"))
203 return false;
204 return CDEWithVPTSuffix.count(Mnemonic);
205 }
206
207 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208 /// (possibly with a condition suffix)
209 bool isITPredicableCDEInstr(StringRef Mnemonic) {
210 if (!Mnemonic.startswith("cx"))
211 return false;
212 return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
213 Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
214 Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
215 }
216
217 /// Return true iff a given mnemonic is an integer CDE instruction with
218 /// dual-register destination
219 bool isCDEDualRegInstr(StringRef Mnemonic) {
220 if (!Mnemonic.startswith("cx"))
221 return false;
222 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224 Mnemonic == "cx3d" || Mnemonic == "cx3da";
225 }
226};
227
228ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230 "cx2", "cx2a", "cx2d", "cx2da",
231 "cx3", "cx3a", "cx3d", "cx3da", })
232 CDE.insert(Mnemonic);
233 for (StringRef Mnemonic :
234 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235 CDE.insert(Mnemonic);
236 CDEWithVPTSuffix.insert(Mnemonic);
237 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239 }
240}
241
242class ARMAsmParser : public MCTargetAsmParser {
243 const MCRegisterInfo *MRI;
244 UnwindContext UC;
245 ARMMnemonicSets MS;
246
247 ARMTargetStreamer &getTargetStreamer() {
248 assert(getParser().getStreamer().getTargetStreamer() &&((getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"
) ? static_cast<void> (0) : __assert_fail ("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 249, __PRETTY_FUNCTION__))
249 "do not have a target streamer")((getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"
) ? static_cast<void> (0) : __assert_fail ("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 249, __PRETTY_FUNCTION__))
;
250 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
251 return static_cast<ARMTargetStreamer &>(TS);
252 }
253
254 // Map of register aliases registers via the .req directive.
255 StringMap<unsigned> RegisterReqs;
256
257 bool NextSymbolIsThumb;
258
259 bool useImplicitITThumb() const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262 }
263
264 bool useImplicitITARM() const {
265 return ImplicitItMode == ImplicitItModeTy::Always ||
266 ImplicitItMode == ImplicitItModeTy::ARMOnly;
267 }
268
269 struct {
270 ARMCC::CondCodes Cond; // Condition for IT block.
271 unsigned Mask:4; // Condition mask for instructions.
272 // Starting at first 1 (from lsb).
273 // '1' condition as indicated in IT.
274 // '0' inverse of condition (else).
275 // Count of instructions in IT block is
276 // 4 - trailingzeroes(mask)
277 // Note that this does not have the same encoding
278 // as in the IT instruction, which also depends
279 // on the low bit of the condition code.
280
281 unsigned CurPosition; // Current position in parsing of IT
282 // block. In range [0,4], with 0 being the IT
283 // instruction itself. Initialized according to
284 // count of instructions in block. ~0U if no
285 // active IT block.
286
287 bool IsExplicit; // true - The IT instruction was present in the
288 // input, we should not modify it.
289 // false - The IT instruction was added
290 // implicitly, we can extend it if that
291 // would be legal.
292 } ITState;
293
294 SmallVector<MCInst, 4> PendingConditionalInsts;
295
296 void flushPendingInstructions(MCStreamer &Out) override {
297 if (!inImplicitITBlock()) {
298 assert(PendingConditionalInsts.size() == 0)((PendingConditionalInsts.size() == 0) ? static_cast<void>
(0) : __assert_fail ("PendingConditionalInsts.size() == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 298, __PRETTY_FUNCTION__))
;
299 return;
300 }
301
302 // Emit the IT instruction
303 MCInst ITInst;
304 ITInst.setOpcode(ARM::t2IT);
305 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306 ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307 Out.emitInstruction(ITInst, getSTI());
308
309 // Emit the conditonal instructions
310 assert(PendingConditionalInsts.size() <= 4)((PendingConditionalInsts.size() <= 4) ? static_cast<void
> (0) : __assert_fail ("PendingConditionalInsts.size() <= 4"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 310, __PRETTY_FUNCTION__))
;
311 for (const MCInst &Inst : PendingConditionalInsts) {
312 Out.emitInstruction(Inst, getSTI());
313 }
314 PendingConditionalInsts.clear();
315
316 // Clear the IT state
317 ITState.Mask = 0;
318 ITState.CurPosition = ~0U;
319 }
320
321 bool inITBlock() { return ITState.CurPosition != ~0U; }
322 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
323 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324
325 bool lastInITBlock() {
326 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
327 }
328
329 void forwardITPosition() {
330 if (!inITBlock()) return;
331 // Move to the next instruction in the IT block, if there is one. If not,
332 // mark the block as done, except for implicit IT blocks, which we leave
333 // open until we find an instruction that can't be added to it.
334 unsigned TZ = countTrailingZeros(ITState.Mask);
335 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336 ITState.CurPosition = ~0U; // Done with the IT block after this.
337 }
338
339 // Rewind the state of the current IT block, removing the last slot from it.
340 void rewindImplicitITPosition() {
341 assert(inImplicitITBlock())((inImplicitITBlock()) ? static_cast<void> (0) : __assert_fail
("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 341, __PRETTY_FUNCTION__))
;
342 assert(ITState.CurPosition > 1)((ITState.CurPosition > 1) ? static_cast<void> (0) :
__assert_fail ("ITState.CurPosition > 1", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 342, __PRETTY_FUNCTION__))
;
343 ITState.CurPosition--;
344 unsigned TZ = countTrailingZeros(ITState.Mask);
345 unsigned NewMask = 0;
346 NewMask |= ITState.Mask & (0xC << TZ);
347 NewMask |= 0x2 << TZ;
348 ITState.Mask = NewMask;
349 }
350
351 // Rewind the state of the current IT block, removing the last slot from it.
352 // If we were at the first slot, this closes the IT block.
353 void discardImplicitITBlock() {
354 assert(inImplicitITBlock())((inImplicitITBlock()) ? static_cast<void> (0) : __assert_fail
("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 354, __PRETTY_FUNCTION__))
;
355 assert(ITState.CurPosition == 1)((ITState.CurPosition == 1) ? static_cast<void> (0) : __assert_fail
("ITState.CurPosition == 1", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 355, __PRETTY_FUNCTION__))
;
356 ITState.CurPosition = ~0U;
357 }
358
359 // Return the low-subreg of a given Q register.
360 unsigned getDRegFromQReg(unsigned QReg) const {
361 return MRI->getSubReg(QReg, ARM::dsub_0);
362 }
363
364 // Get the condition code corresponding to the current IT block slot.
365 ARMCC::CondCodes currentITCond() {
366 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368 }
369
370 // Invert the condition of the current IT block slot without changing any
371 // other slots in the same block.
372 void invertCurrentITCondition() {
373 if (ITState.CurPosition == 1) {
374 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375 } else {
376 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377 }
378 }
379
380 // Returns true if the current IT block is full (all 4 slots used).
381 bool isITBlockFull() {
382 return inITBlock() && (ITState.Mask & 1);
383 }
384
385 // Extend the current implicit IT block to have one more slot with the given
386 // condition code.
387 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388 assert(inImplicitITBlock())((inImplicitITBlock()) ? static_cast<void> (0) : __assert_fail
("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 388, __PRETTY_FUNCTION__))
;
389 assert(!isITBlockFull())((!isITBlockFull()) ? static_cast<void> (0) : __assert_fail
("!isITBlockFull()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 389, __PRETTY_FUNCTION__))
;
390 assert(Cond == ITState.Cond ||((Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition
(ITState.Cond)) ? static_cast<void> (0) : __assert_fail
("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 391, __PRETTY_FUNCTION__))
391 Cond == ARMCC::getOppositeCondition(ITState.Cond))((Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition
(ITState.Cond)) ? static_cast<void> (0) : __assert_fail
("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 391, __PRETTY_FUNCTION__))
;
392 unsigned TZ = countTrailingZeros(ITState.Mask);
393 unsigned NewMask = 0;
394 // Keep any existing condition bits.
395 NewMask |= ITState.Mask & (0xE << TZ);
396 // Insert the new condition bit.
397 NewMask |= (Cond != ITState.Cond) << TZ;
398 // Move the trailing 1 down one bit.
399 NewMask |= 1 << (TZ - 1);
400 ITState.Mask = NewMask;
401 }
402
403 // Create a new implicit IT block with a dummy condition code.
404 void startImplicitITBlock() {
405 assert(!inITBlock())((!inITBlock()) ? static_cast<void> (0) : __assert_fail
("!inITBlock()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 405, __PRETTY_FUNCTION__))
;
406 ITState.Cond = ARMCC::AL;
407 ITState.Mask = 8;
408 ITState.CurPosition = 1;
409 ITState.IsExplicit = false;
410 }
411
412 // Create a new explicit IT block with the given condition and mask.
413 // The mask should be in the format used in ARMOperand and
414 // MCOperand, with a 1 implying 'e', regardless of the low bit of
415 // the condition.
416 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417 assert(!inITBlock())((!inITBlock()) ? static_cast<void> (0) : __assert_fail
("!inITBlock()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 417, __PRETTY_FUNCTION__))
;
418 ITState.Cond = Cond;
419 ITState.Mask = Mask;
420 ITState.CurPosition = 0;
421 ITState.IsExplicit = true;
422 }
423
424 struct {
425 unsigned Mask : 4;
426 unsigned CurPosition;
427 } VPTState;
428 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
429 void forwardVPTPosition() {
430 if (!inVPTBlock()) return;
431 unsigned TZ = countTrailingZeros(VPTState.Mask);
432 if (++VPTState.CurPosition == 5 - TZ)
433 VPTState.CurPosition = ~0U;
434 }
435
436 void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
437 return getParser().Note(L, Msg, Range);
438 }
439
440 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
441 return getParser().Warning(L, Msg, Range);
442 }
443
444 bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
445 return getParser().Error(L, Msg, Range);
446 }
447
448 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449 unsigned ListNo, bool IsARPop = false);
450 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451 unsigned ListNo);
452
453 int tryParseRegister();
454 bool tryParseRegisterWithWriteBack(OperandVector &);
455 int tryParseShiftRegister(OperandVector &);
456 bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
457 bool parseMemory(OperandVector &);
458 bool parseOperand(OperandVector &, StringRef Mnemonic);
459 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
460 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
461 unsigned &ShiftAmount);
462 bool parseLiteralValues(unsigned Size, SMLoc L);
463 bool parseDirectiveThumb(SMLoc L);
464 bool parseDirectiveARM(SMLoc L);
465 bool parseDirectiveThumbFunc(SMLoc L);
466 bool parseDirectiveCode(SMLoc L);
467 bool parseDirectiveSyntax(SMLoc L);
468 bool parseDirectiveReq(StringRef Name, SMLoc L);
469 bool parseDirectiveUnreq(SMLoc L);
470 bool parseDirectiveArch(SMLoc L);
471 bool parseDirectiveEabiAttr(SMLoc L);
472 bool parseDirectiveCPU(SMLoc L);
473 bool parseDirectiveFPU(SMLoc L);
474 bool parseDirectiveFnStart(SMLoc L);
475 bool parseDirectiveFnEnd(SMLoc L);
476 bool parseDirectiveCantUnwind(SMLoc L);
477 bool parseDirectivePersonality(SMLoc L);
478 bool parseDirectiveHandlerData(SMLoc L);
479 bool parseDirectiveSetFP(SMLoc L);
480 bool parseDirectivePad(SMLoc L);
481 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
482 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
483 bool parseDirectiveLtorg(SMLoc L);
484 bool parseDirectiveEven(SMLoc L);
485 bool parseDirectivePersonalityIndex(SMLoc L);
486 bool parseDirectiveUnwindRaw(SMLoc L);
487 bool parseDirectiveTLSDescSeq(SMLoc L);
488 bool parseDirectiveMovSP(SMLoc L);
489 bool parseDirectiveObjectArch(SMLoc L);
490 bool parseDirectiveArchExtension(SMLoc L);
491 bool parseDirectiveAlign(SMLoc L);
492 bool parseDirectiveThumbSet(SMLoc L);
493
494 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
495 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
496 unsigned &PredicationCode,
497 unsigned &VPTPredicationCode, bool &CarrySetting,
498 unsigned &ProcessorIMod, StringRef &ITMask);
499 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
500 StringRef FullInst, bool &CanAcceptCarrySet,
501 bool &CanAcceptPredicationCode,
502 bool &CanAcceptVPTPredicationCode);
503
504 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
505 OperandVector &Operands);
506 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
507
508 bool isThumb() const {
509 // FIXME: Can tablegen auto-generate this?
510 return getSTI().getFeatureBits()[ARM::ModeThumb];
511 }
512
513 bool isThumbOne() const {
514 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
515 }
516
517 bool isThumbTwo() const {
518 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
519 }
520
521 bool hasThumb() const {
522 return getSTI().getFeatureBits()[ARM::HasV4TOps];
523 }
524
525 bool hasThumb2() const {
526 return getSTI().getFeatureBits()[ARM::FeatureThumb2];
527 }
528
529 bool hasV6Ops() const {
530 return getSTI().getFeatureBits()[ARM::HasV6Ops];
531 }
532
533 bool hasV6T2Ops() const {
534 return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
535 }
536
537 bool hasV6MOps() const {
538 return getSTI().getFeatureBits()[ARM::HasV6MOps];
539 }
540
541 bool hasV7Ops() const {
542 return getSTI().getFeatureBits()[ARM::HasV7Ops];
543 }
544
545 bool hasV8Ops() const {
546 return getSTI().getFeatureBits()[ARM::HasV8Ops];
547 }
548
549 bool hasV8MBaseline() const {
550 return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
551 }
552
553 bool hasV8MMainline() const {
554 return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
555 }
556 bool hasV8_1MMainline() const {
557 return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
558 }
559 bool hasMVE() const {
560 return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
561 }
562 bool hasMVEFloat() const {
563 return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
564 }
565 bool hasCDE() const {
566 return getSTI().getFeatureBits()[ARM::HasCDEOps];
567 }
568 bool has8MSecExt() const {
569 return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
570 }
571
572 bool hasARM() const {
573 return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
574 }
575
576 bool hasDSP() const {
577 return getSTI().getFeatureBits()[ARM::FeatureDSP];
578 }
579
580 bool hasD32() const {
581 return getSTI().getFeatureBits()[ARM::FeatureD32];
582 }
583
584 bool hasV8_1aOps() const {
585 return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
586 }
587
588 bool hasRAS() const {
589 return getSTI().getFeatureBits()[ARM::FeatureRAS];
590 }
591
592 void SwitchMode() {
593 MCSubtargetInfo &STI = copySTI();
594 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
595 setAvailableFeatures(FB);
596 }
597
598 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
599
600 bool isMClass() const {
601 return getSTI().getFeatureBits()[ARM::FeatureMClass];
602 }
603
604 /// @name Auto-generated Match Functions
605 /// {
606
607#define GET_ASSEMBLER_HEADER
608#include "ARMGenAsmMatcher.inc"
609
610 /// }
611
612 OperandMatchResultTy parseITCondCode(OperandVector &);
613 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
614 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
615 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
616 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
617 OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
618 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
619 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
620 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
621 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
622 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
623 int High);
624 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
625 return parsePKHImm(O, "lsl", 0, 31);
626 }
627 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
628 return parsePKHImm(O, "asr", 1, 32);
629 }
630 OperandMatchResultTy parseSetEndImm(OperandVector &);
631 OperandMatchResultTy parseShifterImm(OperandVector &);
632 OperandMatchResultTy parseRotImm(OperandVector &);
633 OperandMatchResultTy parseModImm(OperandVector &);
634 OperandMatchResultTy parseBitfield(OperandVector &);
635 OperandMatchResultTy parsePostIdxReg(OperandVector &);
636 OperandMatchResultTy parseAM3Offset(OperandVector &);
637 OperandMatchResultTy parseFPImm(OperandVector &);
638 OperandMatchResultTy parseVectorList(OperandVector &);
639 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
640 SMLoc &EndLoc);
641
642 // Asm Match Converter Methods
643 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
644 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
645 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
646
647 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
648 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
649 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
650 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
651 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
652 bool isITBlockTerminator(MCInst &Inst) const;
653 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
654 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
655 bool Load, bool ARMMode, bool Writeback);
656
657public:
658 enum ARMMatchResultTy {
659 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
660 Match_RequiresNotITBlock,
661 Match_RequiresV6,
662 Match_RequiresThumb2,
663 Match_RequiresV8,
664 Match_RequiresFlagSetting,
665#define GET_OPERAND_DIAGNOSTIC_TYPES
666#include "ARMGenAsmMatcher.inc"
667
668 };
669
670 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
671 const MCInstrInfo &MII, const MCTargetOptions &Options)
672 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
673 MCAsmParserExtension::Initialize(Parser);
674
675 // Cache the MCRegisterInfo.
676 MRI = getContext().getRegisterInfo();
677
678 // Initialize the set of available features.
679 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
680
681 // Add build attributes based on the selected target.
682 if (AddBuildAttributes)
683 getTargetStreamer().emitTargetAttributes(STI);
684
685 // Not in an ITBlock to start with.
686 ITState.CurPosition = ~0U;
687
688 VPTState.CurPosition = ~0U;
689
690 NextSymbolIsThumb = false;
691 }
692
693 // Implementation of the MCTargetAsmParser interface:
694 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
695 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
696 SMLoc &EndLoc) override;
697 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
698 SMLoc NameLoc, OperandVector &Operands) override;
699 bool ParseDirective(AsmToken DirectiveID) override;
700
701 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
702 unsigned Kind) override;
703 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
704
705 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
706 OperandVector &Operands, MCStreamer &Out,
707 uint64_t &ErrorInfo,
708 bool MatchingInlineAsm) override;
709 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
710 SmallVectorImpl<NearMissInfo> &NearMisses,
711 bool MatchingInlineAsm, bool &EmitInITBlock,
712 MCStreamer &Out);
713
714 struct NearMissMessage {
715 SMLoc Loc;
716 SmallString<128> Message;
717 };
718
719 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
720
721 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
722 SmallVectorImpl<NearMissMessage> &NearMissesOut,
723 SMLoc IDLoc, OperandVector &Operands);
724 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
725 OperandVector &Operands);
726
727 void doBeforeLabelEmit(MCSymbol *Symbol) override;
728
729 void onLabelParsed(MCSymbol *Symbol) override;
730};
731
732/// ARMOperand - Instances of this class represent a parsed ARM machine
733/// operand.
734class ARMOperand : public MCParsedAsmOperand {
735 enum KindTy {
736 k_CondCode,
737 k_VPTPred,
738 k_CCOut,
739 k_ITCondMask,
740 k_CoprocNum,
741 k_CoprocReg,
742 k_CoprocOption,
743 k_Immediate,
744 k_MemBarrierOpt,
745 k_InstSyncBarrierOpt,
746 k_TraceSyncBarrierOpt,
747 k_Memory,
748 k_PostIndexRegister,
749 k_MSRMask,
750 k_BankedReg,
751 k_ProcIFlags,
752 k_VectorIndex,
753 k_Register,
754 k_RegisterList,
755 k_RegisterListWithAPSR,
756 k_DPRRegisterList,
757 k_SPRRegisterList,
758 k_FPSRegisterListWithVPR,
759 k_FPDRegisterListWithVPR,
760 k_VectorList,
761 k_VectorListAllLanes,
762 k_VectorListIndexed,
763 k_ShiftedRegister,
764 k_ShiftedImmediate,
765 k_ShifterImmediate,
766 k_RotateImmediate,
767 k_ModifiedImmediate,
768 k_ConstantPoolImmediate,
769 k_BitfieldDescriptor,
770 k_Token,
771 } Kind;
772
773 SMLoc StartLoc, EndLoc, AlignmentLoc;
774 SmallVector<unsigned, 8> Registers;
775
776 struct CCOp {
777 ARMCC::CondCodes Val;
778 };
779
780 struct VCCOp {
781 ARMVCC::VPTCodes Val;
782 };
783
784 struct CopOp {
785 unsigned Val;
786 };
787
788 struct CoprocOptionOp {
789 unsigned Val;
790 };
791
792 struct ITMaskOp {
793 unsigned Mask:4;
794 };
795
796 struct MBOptOp {
797 ARM_MB::MemBOpt Val;
798 };
799
800 struct ISBOptOp {
801 ARM_ISB::InstSyncBOpt Val;
802 };
803
804 struct TSBOptOp {
805 ARM_TSB::TraceSyncBOpt Val;
806 };
807
808 struct IFlagsOp {
809 ARM_PROC::IFlags Val;
810 };
811
812 struct MMaskOp {
813 unsigned Val;
814 };
815
816 struct BankedRegOp {
817 unsigned Val;
818 };
819
820 struct TokOp {
821 const char *Data;
822 unsigned Length;
823 };
824
825 struct RegOp {
826 unsigned RegNum;
827 };
828
829 // A vector register list is a sequential list of 1 to 4 registers.
830 struct VectorListOp {
831 unsigned RegNum;
832 unsigned Count;
833 unsigned LaneIndex;
834 bool isDoubleSpaced;
835 };
836
837 struct VectorIndexOp {
838 unsigned Val;
839 };
840
841 struct ImmOp {
842 const MCExpr *Val;
843 };
844
845 /// Combined record for all forms of ARM address expressions.
846 struct MemoryOp {
847 unsigned BaseRegNum;
848 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
849 // was specified.
850 const MCConstantExpr *OffsetImm; // Offset immediate value
851 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
852 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
853 unsigned ShiftImm; // shift for OffsetReg.
854 unsigned Alignment; // 0 = no alignment specified
855 // n = alignment in bytes (2, 4, 8, 16, or 32)
856 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
857 };
858
859 struct PostIdxRegOp {
860 unsigned RegNum;
861 bool isAdd;
862 ARM_AM::ShiftOpc ShiftTy;
863 unsigned ShiftImm;
864 };
865
866 struct ShifterImmOp {
867 bool isASR;
868 unsigned Imm;
869 };
870
871 struct RegShiftedRegOp {
872 ARM_AM::ShiftOpc ShiftTy;
873 unsigned SrcReg;
874 unsigned ShiftReg;
875 unsigned ShiftImm;
876 };
877
878 struct RegShiftedImmOp {
879 ARM_AM::ShiftOpc ShiftTy;
880 unsigned SrcReg;
881 unsigned ShiftImm;
882 };
883
884 struct RotImmOp {
885 unsigned Imm;
886 };
887
888 struct ModImmOp {
889 unsigned Bits;
890 unsigned Rot;
891 };
892
893 struct BitfieldOp {
894 unsigned LSB;
895 unsigned Width;
896 };
897
898 union {
899 struct CCOp CC;
900 struct VCCOp VCC;
901 struct CopOp Cop;
902 struct CoprocOptionOp CoprocOption;
903 struct MBOptOp MBOpt;
904 struct ISBOptOp ISBOpt;
905 struct TSBOptOp TSBOpt;
906 struct ITMaskOp ITMask;
907 struct IFlagsOp IFlags;
908 struct MMaskOp MMask;
909 struct BankedRegOp BankedReg;
910 struct TokOp Tok;
911 struct RegOp Reg;
912 struct VectorListOp VectorList;
913 struct VectorIndexOp VectorIndex;
914 struct ImmOp Imm;
915 struct MemoryOp Memory;
916 struct PostIdxRegOp PostIdxReg;
917 struct ShifterImmOp ShifterImm;
918 struct RegShiftedRegOp RegShiftedReg;
919 struct RegShiftedImmOp RegShiftedImm;
920 struct RotImmOp RotImm;
921 struct ModImmOp ModImm;
922 struct BitfieldOp Bitfield;
923 };
924
925public:
926 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
927
928 /// getStartLoc - Get the location of the first token of this operand.
929 SMLoc getStartLoc() const override { return StartLoc; }
930
931 /// getEndLoc - Get the location of the last token of this operand.
932 SMLoc getEndLoc() const override { return EndLoc; }
933
934 /// getLocRange - Get the range between the first and last token of this
935 /// operand.
936 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
937
938 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
939 SMLoc getAlignmentLoc() const {
940 assert(Kind == k_Memory && "Invalid access!")((Kind == k_Memory && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Memory && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 940, __PRETTY_FUNCTION__))
;
941 return AlignmentLoc;
942 }
943
944 ARMCC::CondCodes getCondCode() const {
945 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 945, __PRETTY_FUNCTION__))
;
946 return CC.Val;
947 }
948
949 ARMVCC::VPTCodes getVPTPred() const {
950 assert(isVPTPred() && "Invalid access!")((isVPTPred() && "Invalid access!") ? static_cast<
void> (0) : __assert_fail ("isVPTPred() && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 950, __PRETTY_FUNCTION__))
;
951 return VCC.Val;
952 }
953
954 unsigned getCoproc() const {
955 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!")(((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"
) ? static_cast<void> (0) : __assert_fail ("(Kind == k_CoprocNum || Kind == k_CoprocReg) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 955, __PRETTY_FUNCTION__))
;
956 return Cop.Val;
957 }
958
959 StringRef getToken() const {
960 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 960, __PRETTY_FUNCTION__))
;
961 return StringRef(Tok.Data, Tok.Length);
962 }
963
964 unsigned getReg() const override {
965 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!")(((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"
) ? static_cast<void> (0) : __assert_fail ("(Kind == k_Register || Kind == k_CCOut) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 965, __PRETTY_FUNCTION__))
;
966 return Reg.RegNum;
967 }
968
969 const SmallVectorImpl<unsigned> &getRegList() const {
970 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||(((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind
== k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR
) && "Invalid access!") ? static_cast<void> (0)
: __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 974, __PRETTY_FUNCTION__))
971 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||(((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind
== k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR
) && "Invalid access!") ? static_cast<void> (0)
: __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 974, __PRETTY_FUNCTION__))
972 Kind == k_FPSRegisterListWithVPR ||(((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind
== k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR
) && "Invalid access!") ? static_cast<void> (0)
: __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 974, __PRETTY_FUNCTION__))
973 Kind == k_FPDRegisterListWithVPR) &&(((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind
== k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR
) && "Invalid access!") ? static_cast<void> (0)
: __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 974, __PRETTY_FUNCTION__))
974 "Invalid access!")(((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind
== k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR
) && "Invalid access!") ? static_cast<void> (0)
: __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 974, __PRETTY_FUNCTION__))
;
975 return Registers;
976 }
977
978 const MCExpr *getImm() const {
979 assert(isImm() && "Invalid access!")((isImm() && "Invalid access!") ? static_cast<void
> (0) : __assert_fail ("isImm() && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 979, __PRETTY_FUNCTION__))
;
980 return Imm.Val;
981 }
982
983 const MCExpr *getConstantPoolImm() const {
984 assert(isConstantPoolImm() && "Invalid access!")((isConstantPoolImm() && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("isConstantPoolImm() && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 984, __PRETTY_FUNCTION__))
;
985 return Imm.Val;
986 }
987
988 unsigned getVectorIndex() const {
989 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 989, __PRETTY_FUNCTION__))
;
990 return VectorIndex.Val;
991 }
992
993 ARM_MB::MemBOpt getMemBarrierOpt() const {
994 assert(Kind == k_MemBarrierOpt && "Invalid access!")((Kind == k_MemBarrierOpt && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_MemBarrierOpt && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 994, __PRETTY_FUNCTION__))
;
995 return MBOpt.Val;
996 }
997
998 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
999 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!")((Kind == k_InstSyncBarrierOpt && "Invalid access!") ?
static_cast<void> (0) : __assert_fail ("Kind == k_InstSyncBarrierOpt && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 999, __PRETTY_FUNCTION__))
;
1000 return ISBOpt.Val;
1001 }
1002
1003 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1004 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!")((Kind == k_TraceSyncBarrierOpt && "Invalid access!")
? static_cast<void> (0) : __assert_fail ("Kind == k_TraceSyncBarrierOpt && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1004, __PRETTY_FUNCTION__))
;
1005 return TSBOpt.Val;
1006 }
1007
1008 ARM_PROC::IFlags getProcIFlags() const {
1009 assert(Kind == k_ProcIFlags && "Invalid access!")((Kind == k_ProcIFlags && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ProcIFlags && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1009, __PRETTY_FUNCTION__))
;
1010 return IFlags.Val;
1011 }
1012
1013 unsigned getMSRMask() const {
1014 assert(Kind == k_MSRMask && "Invalid access!")((Kind == k_MSRMask && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_MSRMask && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1014, __PRETTY_FUNCTION__))
;
1015 return MMask.Val;
1016 }
1017
1018 unsigned getBankedReg() const {
1019 assert(Kind == k_BankedReg && "Invalid access!")((Kind == k_BankedReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BankedReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1019, __PRETTY_FUNCTION__))
;
1020 return BankedReg.Val;
1021 }
1022
1023 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1024 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1025 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1026 bool isCondCode() const { return Kind == k_CondCode; }
1027 bool isVPTPred() const { return Kind == k_VPTPred; }
1028 bool isCCOut() const { return Kind == k_CCOut; }
1029 bool isITMask() const { return Kind == k_ITCondMask; }
1030 bool isITCondCode() const { return Kind == k_CondCode; }
1031 bool isImm() const override {
1032 return Kind == k_Immediate;
1033 }
1034
1035 bool isARMBranchTarget() const {
1036 if (!isImm()) return false;
1037
1038 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1039 return CE->getValue() % 4 == 0;
1040 return true;
1041 }
1042
1043
1044 bool isThumbBranchTarget() const {
1045 if (!isImm()) return false;
1046
1047 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1048 return CE->getValue() % 2 == 0;
1049 return true;
1050 }
1051
1052 // checks whether this operand is an unsigned offset which fits is a field
1053 // of specified width and scaled by a specific number of bits
1054 template<unsigned width, unsigned scale>
1055 bool isUnsignedOffset() const {
1056 if (!isImm()) return false;
1057 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1058 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1059 int64_t Val = CE->getValue();
1060 int64_t Align = 1LL << scale;
1061 int64_t Max = Align * ((1LL << width) - 1);
1062 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1063 }
1064 return false;
1065 }
1066
1067 // checks whether this operand is an signed offset which fits is a field
1068 // of specified width and scaled by a specific number of bits
1069 template<unsigned width, unsigned scale>
1070 bool isSignedOffset() const {
1071 if (!isImm()) return false;
1072 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1073 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1074 int64_t Val = CE->getValue();
1075 int64_t Align = 1LL << scale;
1076 int64_t Max = Align * ((1LL << (width-1)) - 1);
1077 int64_t Min = -Align * (1LL << (width-1));
1078 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1079 }
1080 return false;
1081 }
1082
1083 // checks whether this operand is an offset suitable for the LE /
1084 // LETP instructions in Arm v8.1M
1085 bool isLEOffset() const {
1086 if (!isImm()) return false;
1087 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1091 }
1092 return false;
1093 }
1094
1095 // checks whether this operand is a memory operand computed as an offset
1096 // applied to PC. the offset may have 8 bits of magnitude and is represented
1097 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1098 // relocable expression...
1099 bool isThumbMemPC() const {
1100 int64_t Val = 0;
1101 if (isImm()) {
1102 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1103 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1104 if (!CE) return false;
1105 Val = CE->getValue();
1106 }
1107 else if (isGPRMem()) {
1108 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1109 if(Memory.BaseRegNum != ARM::PC) return false;
1110 Val = Memory.OffsetImm->getValue();
1111 }
1112 else return false;
1113 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1114 }
1115
1116 bool isFPImm() const {
1117 if (!isImm()) return false;
1118 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1119 if (!CE) return false;
1120 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1121 return Val != -1;
1122 }
1123
1124 template<int64_t N, int64_t M>
1125 bool isImmediate() const {
1126 if (!isImm()) return false;
1127 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1128 if (!CE) return false;
1129 int64_t Value = CE->getValue();
1130 return Value >= N && Value <= M;
1131 }
1132
1133 template<int64_t N, int64_t M>
1134 bool isImmediateS4() const {
1135 if (!isImm()) return false;
1136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1137 if (!CE) return false;
1138 int64_t Value = CE->getValue();
1139 return ((Value & 3) == 0) && Value >= N && Value <= M;
1140 }
1141 template<int64_t N, int64_t M>
1142 bool isImmediateS2() const {
1143 if (!isImm()) return false;
1144 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1145 if (!CE) return false;
1146 int64_t Value = CE->getValue();
1147 return ((Value & 1) == 0) && Value >= N && Value <= M;
1148 }
1149 bool isFBits16() const {
1150 return isImmediate<0, 17>();
1151 }
1152 bool isFBits32() const {
1153 return isImmediate<1, 33>();
1154 }
1155 bool isImm8s4() const {
1156 return isImmediateS4<-1020, 1020>();
1157 }
1158 bool isImm7s4() const {
1159 return isImmediateS4<-508, 508>();
1160 }
1161 bool isImm7Shift0() const {
1162 return isImmediate<-127, 127>();
1163 }
1164 bool isImm7Shift1() const {
1165 return isImmediateS2<-255, 255>();
1166 }
1167 bool isImm7Shift2() const {
1168 return isImmediateS4<-511, 511>();
1169 }
1170 bool isImm7() const {
1171 return isImmediate<-127, 127>();
1172 }
1173 bool isImm0_1020s4() const {
1174 return isImmediateS4<0, 1020>();
1175 }
1176 bool isImm0_508s4() const {
1177 return isImmediateS4<0, 508>();
1178 }
1179 bool isImm0_508s4Neg() const {
1180 if (!isImm()) return false;
1181 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1182 if (!CE) return false;
1183 int64_t Value = -CE->getValue();
1184 // explicitly exclude zero. we want that to use the normal 0_508 version.
1185 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1186 }
1187
1188 bool isImm0_4095Neg() const {
1189 if (!isImm()) return false;
1190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1191 if (!CE) return false;
1192 // isImm0_4095Neg is used with 32-bit immediates only.
1193 // 32-bit immediates are zero extended to 64-bit when parsed,
1194 // thus simple -CE->getValue() results in a big negative number,
1195 // not a small positive number as intended
1196 if ((CE->getValue() >> 32) > 0) return false;
1197 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1198 return Value > 0 && Value < 4096;
1199 }
1200
1201 bool isImm0_7() const {
1202 return isImmediate<0, 7>();
1203 }
1204
1205 bool isImm1_16() const {
1206 return isImmediate<1, 16>();
1207 }
1208
1209 bool isImm1_32() const {
1210 return isImmediate<1, 32>();
1211 }
1212
1213 bool isImm8_255() const {
1214 return isImmediate<8, 255>();
1215 }
1216
1217 bool isImm256_65535Expr() const {
1218 if (!isImm()) return false;
1219 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1220 // If it's not a constant expression, it'll generate a fixup and be
1221 // handled later.
1222 if (!CE) return true;
1223 int64_t Value = CE->getValue();
1224 return Value >= 256 && Value < 65536;
1225 }
1226
1227 bool isImm0_65535Expr() const {
1228 if (!isImm()) return false;
1229 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1230 // If it's not a constant expression, it'll generate a fixup and be
1231 // handled later.
1232 if (!CE) return true;
1233 int64_t Value = CE->getValue();
1234 return Value >= 0 && Value < 65536;
1235 }
1236
1237 bool isImm24bit() const {
1238 return isImmediate<0, 0xffffff + 1>();
1239 }
1240
1241 bool isImmThumbSR() const {
1242 return isImmediate<1, 33>();
1243 }
1244
1245 template<int shift>
1246 bool isExpImmValue(uint64_t Value) const {
1247 uint64_t mask = (1 << shift) - 1;
1248 if ((Value & mask) != 0 || (Value >> shift) > 0xff)
1249 return false;
1250 return true;
1251 }
1252
1253 template<int shift>
1254 bool isExpImm() const {
1255 if (!isImm()) return false;
1256 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1257 if (!CE) return false;
1258
1259 return isExpImmValue<shift>(CE->getValue());
1260 }
1261
1262 template<int shift, int size>
1263 bool isInvertedExpImm() const {
1264 if (!isImm()) return false;
1265 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1266 if (!CE) return false;
1267
1268 uint64_t OriginalValue = CE->getValue();
1269 uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
1270 return isExpImmValue<shift>(InvertedValue);
1271 }
1272
1273 bool isPKHLSLImm() const {
1274 return isImmediate<0, 32>();
1275 }
1276
1277 bool isPKHASRImm() const {
1278 return isImmediate<0, 33>();
1279 }
1280
1281 bool isAdrLabel() const {
1282 // If we have an immediate that's not a constant, treat it as a label
1283 // reference needing a fixup.
1284 if (isImm() && !isa<MCConstantExpr>(getImm()))
1285 return true;
1286
1287 // If it is a constant, it must fit into a modified immediate encoding.
1288 if (!isImm()) return false;
1289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1290 if (!CE) return false;
1291 int64_t Value = CE->getValue();
1292 return (ARM_AM::getSOImmVal(Value) != -1 ||
1293 ARM_AM::getSOImmVal(-Value) != -1);
1294 }
1295
1296 bool isT2SOImm() const {
1297 // If we have an immediate that's not a constant, treat it as an expression
1298 // needing a fixup.
1299 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1300 // We want to avoid matching :upper16: and :lower16: as we want these
1301 // expressions to match in isImm0_65535Expr()
1302 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1303 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1304 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1305 }
1306 if (!isImm()) return false;
1307 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308 if (!CE) return false;
1309 int64_t Value = CE->getValue();
1310 return ARM_AM::getT2SOImmVal(Value) != -1;
1311 }
1312
1313 bool isT2SOImmNot() const {
1314 if (!isImm()) return false;
1315 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1316 if (!CE) return false;
1317 int64_t Value = CE->getValue();
1318 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1319 ARM_AM::getT2SOImmVal(~Value) != -1;
1320 }
1321
1322 bool isT2SOImmNeg() const {
1323 if (!isImm()) return false;
1324 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1325 if (!CE) return false;
1326 int64_t Value = CE->getValue();
1327 // Only use this when not representable as a plain so_imm.
1328 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1329 ARM_AM::getT2SOImmVal(-Value) != -1;
1330 }
1331
1332 bool isSetEndImm() const {
1333 if (!isImm()) return false;
1334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1335 if (!CE) return false;
1336 int64_t Value = CE->getValue();
1337 return Value == 1 || Value == 0;
1338 }
1339
1340 bool isReg() const override { return Kind == k_Register; }
1341 bool isRegList() const { return Kind == k_RegisterList; }
1342 bool isRegListWithAPSR() const {
1343 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1344 }
1345 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1346 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1347 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1348 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1349 bool isToken() const override { return Kind == k_Token; }
1350 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1351 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1352 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1353 bool isMem() const override {
1354 return isGPRMem() || isMVEMem();
1355 }
1356 bool isMVEMem() const {
1357 if (Kind != k_Memory)
1358 return false;
1359 if (Memory.BaseRegNum &&
1360 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1361 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1362 return false;
1363 if (Memory.OffsetRegNum &&
1364 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1365 Memory.OffsetRegNum))
1366 return false;
1367 return true;
1368 }
1369 bool isGPRMem() const {
1370 if (Kind != k_Memory)
1371 return false;
1372 if (Memory.BaseRegNum &&
1373 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1374 return false;
1375 if (Memory.OffsetRegNum &&
1376 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1377 return false;
1378 return true;
1379 }
1380 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1381 bool isRegShiftedReg() const {
1382 return Kind == k_ShiftedRegister &&
1383 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1384 RegShiftedReg.SrcReg) &&
1385 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1386 RegShiftedReg.ShiftReg);
1387 }
1388 bool isRegShiftedImm() const {
1389 return Kind == k_ShiftedImmediate &&
1390 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1391 RegShiftedImm.SrcReg);
1392 }
1393 bool isRotImm() const { return Kind == k_RotateImmediate; }
1394
1395 template<unsigned Min, unsigned Max>
1396 bool isPowerTwoInRange() const {
1397 if (!isImm()) return false;
1398 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1399 if (!CE) return false;
1400 int64_t Value = CE->getValue();
1401 return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
1402 Value >= Min && Value <= Max;
1403 }
1404 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1405
1406 bool isModImmNot() const {
1407 if (!isImm()) return false;
1408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1409 if (!CE) return false;
1410 int64_t Value = CE->getValue();
1411 return ARM_AM::getSOImmVal(~Value) != -1;
1412 }
1413
1414 bool isModImmNeg() const {
1415 if (!isImm()) return false;
1416 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1417 if (!CE) return false;
1418 int64_t Value = CE->getValue();
1419 return ARM_AM::getSOImmVal(Value) == -1 &&
1420 ARM_AM::getSOImmVal(-Value) != -1;
1421 }
1422
1423 bool isThumbModImmNeg1_7() const {
1424 if (!isImm()) return false;
1425 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1426 if (!CE) return false;
1427 int32_t Value = -(int32_t)CE->getValue();
1428 return 0 < Value && Value < 8;
1429 }
1430
1431 bool isThumbModImmNeg8_255() const {
1432 if (!isImm()) return false;
1433 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1434 if (!CE) return false;
1435 int32_t Value = -(int32_t)CE->getValue();
1436 return 7 < Value && Value < 256;
1437 }
1438
1439 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1440 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1441 bool isPostIdxRegShifted() const {
1442 return Kind == k_PostIndexRegister &&
1443 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1444 }
1445 bool isPostIdxReg() const {
1446 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1447 }
1448 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1449 if (!isGPRMem())
1450 return false;
1451 // No offset of any kind.
1452 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1453 (alignOK || Memory.Alignment == Alignment);
1454 }
1455 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1456 if (!isGPRMem())
1457 return false;
1458
1459 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1460 Memory.BaseRegNum))
1461 return false;
1462
1463 // No offset of any kind.
1464 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1465 (alignOK || Memory.Alignment == Alignment);
1466 }
1467 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1468 if (!isGPRMem())
1469 return false;
1470
1471 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1472 Memory.BaseRegNum))
1473 return false;
1474
1475 // No offset of any kind.
1476 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1477 (alignOK || Memory.Alignment == Alignment);
1478 }
1479 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1480 if (!isGPRMem())
1481 return false;
1482
1483 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1484 Memory.BaseRegNum))
1485 return false;
1486
1487 // No offset of any kind.
1488 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1489 (alignOK || Memory.Alignment == Alignment);
1490 }
1491 bool isMemPCRelImm12() const {
1492 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1493 return false;
1494 // Base register must be PC.
1495 if (Memory.BaseRegNum != ARM::PC)
1496 return false;
1497 // Immediate offset in range [-4095, 4095].
1498 if (!Memory.OffsetImm) return true;
1499 int64_t Val = Memory.OffsetImm->getValue();
1500 return (Val > -4096 && Val < 4096) ||
1501 (Val == std::numeric_limits<int32_t>::min());
1502 }
1503
1504 bool isAlignedMemory() const {
1505 return isMemNoOffset(true);
1506 }
1507
1508 bool isAlignedMemoryNone() const {
1509 return isMemNoOffset(false, 0);
1510 }
1511
1512 bool isDupAlignedMemoryNone() const {
1513 return isMemNoOffset(false, 0);
1514 }
1515
1516 bool isAlignedMemory16() const {
1517 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1518 return true;
1519 return isMemNoOffset(false, 0);
1520 }
1521
1522 bool isDupAlignedMemory16() const {
1523 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1524 return true;
1525 return isMemNoOffset(false, 0);
1526 }
1527
1528 bool isAlignedMemory32() const {
1529 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1530 return true;
1531 return isMemNoOffset(false, 0);
1532 }
1533
1534 bool isDupAlignedMemory32() const {
1535 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1536 return true;
1537 return isMemNoOffset(false, 0);
1538 }
1539
1540 bool isAlignedMemory64() const {
1541 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1542 return true;
1543 return isMemNoOffset(false, 0);
1544 }
1545
1546 bool isDupAlignedMemory64() const {
1547 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1548 return true;
1549 return isMemNoOffset(false, 0);
1550 }
1551
1552 bool isAlignedMemory64or128() const {
1553 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1554 return true;
1555 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1556 return true;
1557 return isMemNoOffset(false, 0);
1558 }
1559
1560 bool isDupAlignedMemory64or128() const {
1561 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1562 return true;
1563 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1564 return true;
1565 return isMemNoOffset(false, 0);
1566 }
1567
1568 bool isAlignedMemory64or128or256() const {
1569 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1570 return true;
1571 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1572 return true;
1573 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1574 return true;
1575 return isMemNoOffset(false, 0);
1576 }
1577
1578 bool isAddrMode2() const {
1579 if (!isGPRMem() || Memory.Alignment != 0) return false;
1580 // Check for register offset.
1581 if (Memory.OffsetRegNum) return true;
1582 // Immediate offset in range [-4095, 4095].
1583 if (!Memory.OffsetImm) return true;
1584 int64_t Val = Memory.OffsetImm->getValue();
1585 return Val > -4096 && Val < 4096;
1586 }
1587
1588 bool isAM2OffsetImm() const {
1589 if (!isImm()) return false;
1590 // Immediate offset in range [-4095, 4095].
1591 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1592 if (!CE) return false;
1593 int64_t Val = CE->getValue();
1594 return (Val == std::numeric_limits<int32_t>::min()) ||
1595 (Val > -4096 && Val < 4096);
1596 }
1597
1598 bool isAddrMode3() const {
1599 // If we have an immediate that's not a constant, treat it as a label
1600 // reference needing a fixup. If it is a constant, it's something else
1601 // and we reject it.
1602 if (isImm() && !isa<MCConstantExpr>(getImm()))
1603 return true;
1604 if (!isGPRMem() || Memory.Alignment != 0) return false;
1605 // No shifts are legal for AM3.
1606 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1607 // Check for register offset.
1608 if (Memory.OffsetRegNum) return true;
1609 // Immediate offset in range [-255, 255].
1610 if (!Memory.OffsetImm) return true;
1611 int64_t Val = Memory.OffsetImm->getValue();
1612 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1613 // have to check for this too.
1614 return (Val > -256 && Val < 256) ||
1615 Val == std::numeric_limits<int32_t>::min();
1616 }
1617
1618 bool isAM3Offset() const {
1619 if (isPostIdxReg())
1620 return true;
1621 if (!isImm())
1622 return false;
1623 // Immediate offset in range [-255, 255].
1624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1625 if (!CE) return false;
1626 int64_t Val = CE->getValue();
1627 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1628 return (Val > -256 && Val < 256) ||
1629 Val == std::numeric_limits<int32_t>::min();
1630 }
1631
1632 bool isAddrMode5() const {
1633 // If we have an immediate that's not a constant, treat it as a label
1634 // reference needing a fixup. If it is a constant, it's something else
1635 // and we reject it.
1636 if (isImm() && !isa<MCConstantExpr>(getImm()))
1637 return true;
1638 if (!isGPRMem() || Memory.Alignment != 0) return false;
1639 // Check for register offset.
1640 if (Memory.OffsetRegNum) return false;
1641 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1642 if (!Memory.OffsetImm) return true;
1643 int64_t Val = Memory.OffsetImm->getValue();
1644 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1645 Val == std::numeric_limits<int32_t>::min();
1646 }
1647
1648 bool isAddrMode5FP16() const {
1649 // If we have an immediate that's not a constant, treat it as a label
1650 // reference needing a fixup. If it is a constant, it's something else
1651 // and we reject it.
1652 if (isImm() && !isa<MCConstantExpr>(getImm()))
1653 return true;
1654 if (!isGPRMem() || Memory.Alignment != 0) return false;
1655 // Check for register offset.
1656 if (Memory.OffsetRegNum) return false;
1657 // Immediate offset in range [-510, 510] and a multiple of 2.
1658 if (!Memory.OffsetImm) return true;
1659 int64_t Val = Memory.OffsetImm->getValue();
1660 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1661 Val == std::numeric_limits<int32_t>::min();
1662 }
1663
1664 bool isMemTBB() const {
1665 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1666 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1667 return false;
1668 return true;
1669 }
1670
1671 bool isMemTBH() const {
1672 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1673 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1674 Memory.Alignment != 0 )
1675 return false;
1676 return true;
1677 }
1678
1679 bool isMemRegOffset() const {
1680 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1681 return false;
1682 return true;
1683 }
1684
1685 bool isT2MemRegOffset() const {
1686 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1687 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1688 return false;
1689 // Only lsl #{0, 1, 2, 3} allowed.
1690 if (Memory.ShiftType == ARM_AM::no_shift)
1691 return true;
1692 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1693 return false;
1694 return true;
1695 }
1696
1697 bool isMemThumbRR() const {
1698 // Thumb reg+reg addressing is simple. Just two registers, a base and
1699 // an offset. No shifts, negations or any other complicating factors.
1700 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1701 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1702 return false;
1703 return isARMLowRegister(Memory.BaseRegNum) &&
1704 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1705 }
1706
1707 bool isMemThumbRIs4() const {
1708 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1709 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1710 return false;
1711 // Immediate offset, multiple of 4 in range [0, 124].
1712 if (!Memory.OffsetImm) return true;
1713 int64_t Val = Memory.OffsetImm->getValue();
1714 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1715 }
1716
1717 bool isMemThumbRIs2() const {
1718 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1719 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1720 return false;
1721 // Immediate offset, multiple of 4 in range [0, 62].
1722 if (!Memory.OffsetImm) return true;
1723 int64_t Val = Memory.OffsetImm->getValue();
1724 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1725 }
1726
1727 bool isMemThumbRIs1() const {
1728 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1729 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1730 return false;
1731 // Immediate offset in range [0, 31].
1732 if (!Memory.OffsetImm) return true;
1733 int64_t Val = Memory.OffsetImm->getValue();
1734 return Val >= 0 && Val <= 31;
1735 }
1736
1737 bool isMemThumbSPI() const {
1738 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1739 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1740 return false;
1741 // Immediate offset, multiple of 4 in range [0, 1020].
1742 if (!Memory.OffsetImm) return true;
1743 int64_t Val = Memory.OffsetImm->getValue();
1744 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1745 }
1746
1747 bool isMemImm8s4Offset() const {
1748 // If we have an immediate that's not a constant, treat it as a label
1749 // reference needing a fixup. If it is a constant, it's something else
1750 // and we reject it.
1751 if (isImm() && !isa<MCConstantExpr>(getImm()))
1752 return true;
1753 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1754 return false;
1755 // Immediate offset a multiple of 4 in range [-1020, 1020].
1756 if (!Memory.OffsetImm) return true;
1757 int64_t Val = Memory.OffsetImm->getValue();
1758 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1759 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1760 Val == std::numeric_limits<int32_t>::min();
1761 }
1762 bool isMemImm7s4Offset() const {
1763 // If we have an immediate that's not a constant, treat it as a label
1764 // reference needing a fixup. If it is a constant, it's something else
1765 // and we reject it.
1766 if (isImm() && !isa<MCConstantExpr>(getImm()))
1767 return true;
1768 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1769 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1770 Memory.BaseRegNum))
1771 return false;
1772 // Immediate offset a multiple of 4 in range [-508, 508].
1773 if (!Memory.OffsetImm) return true;
1774 int64_t Val = Memory.OffsetImm->getValue();
1775 // Special case, #-0 is INT32_MIN.
1776 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN(-2147483647-1);
1777 }
1778 bool isMemImm0_1020s4Offset() const {
1779 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1780 return false;
1781 // Immediate offset a multiple of 4 in range [0, 1020].
1782 if (!Memory.OffsetImm) return true;
1783 int64_t Val = Memory.OffsetImm->getValue();
1784 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1785 }
1786
1787 bool isMemImm8Offset() const {
1788 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1789 return false;
1790 // Base reg of PC isn't allowed for these encodings.
1791 if (Memory.BaseRegNum == ARM::PC) return false;
1792 // Immediate offset in range [-255, 255].
1793 if (!Memory.OffsetImm) return true;
1794 int64_t Val = Memory.OffsetImm->getValue();
1795 return (Val == std::numeric_limits<int32_t>::min()) ||
1796 (Val > -256 && Val < 256);
1797 }
1798
1799 template<unsigned Bits, unsigned RegClassID>
1800 bool isMemImm7ShiftedOffset() const {
1801 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1802 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1803 return false;
1804
1805 // Expect an immediate offset equal to an element of the range
1806 // [-127, 127], shifted left by Bits.
1807
1808 if (!Memory.OffsetImm) return true;
1809 int64_t Val = Memory.OffsetImm->getValue();
1810
1811 // INT32_MIN is a special-case value (indicating the encoding with
1812 // zero offset and the subtract bit set)
1813 if (Val == INT32_MIN(-2147483647-1))
1814 return true;
1815
1816 unsigned Divisor = 1U << Bits;
1817
1818 // Check that the low bits are zero
1819 if (Val % Divisor != 0)
1820 return false;
1821
1822 // Check that the remaining offset is within range.
1823 Val /= Divisor;
1824 return (Val >= -127 && Val <= 127);
1825 }
1826
1827 template <int shift> bool isMemRegRQOffset() const {
1828 if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
1829 return false;
1830
1831 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1832 Memory.BaseRegNum))
1833 return false;
1834 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1835 Memory.OffsetRegNum))
1836 return false;
1837
1838 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1839 return false;
1840
1841 if (shift > 0 &&
1842 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1843 return false;
1844
1845 return true;
1846 }
1847
1848 template <int shift> bool isMemRegQOffset() const {
1849 if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1850 return false;
1851
1852 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1853 Memory.BaseRegNum))
1854 return false;
1855
1856 if(!Memory.OffsetImm) return true;
1857 static_assert(shift < 56,
1858 "Such that we dont shift by a value higher than 62");
1859 int64_t Val = Memory.OffsetImm->getValue();
1860
1861 // The value must be a multiple of (1 << shift)
1862 if ((Val & ((1U << shift) - 1)) != 0)
1863 return false;
1864
1865 // And be in the right range, depending on the amount that it is shifted
1866 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1867 // separately.
1868 int64_t Range = (1U << (7+shift)) - 1;
1869 return (Val == INT32_MIN(-2147483647-1)) || (Val > -Range && Val < Range);
1870 }
1871
1872 bool isMemPosImm8Offset() const {
1873 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1874 return false;
1875 // Immediate offset in range [0, 255].
1876 if (!Memory.OffsetImm) return true;
1877 int64_t Val = Memory.OffsetImm->getValue();
1878 return Val >= 0 && Val < 256;
1879 }
1880
1881 bool isMemNegImm8Offset() const {
1882 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1883 return false;
1884 // Base reg of PC isn't allowed for these encodings.
1885 if (Memory.BaseRegNum == ARM::PC) return false;
1886 // Immediate offset in range [-255, -1].
1887 if (!Memory.OffsetImm) return false;
1888 int64_t Val = Memory.OffsetImm->getValue();
1889 return (Val == std::numeric_limits<int32_t>::min()) ||
1890 (Val > -256 && Val < 0);
1891 }
1892
1893 bool isMemUImm12Offset() const {
1894 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1895 return false;
1896 // Immediate offset in range [0, 4095].
1897 if (!Memory.OffsetImm) return true;
1898 int64_t Val = Memory.OffsetImm->getValue();
1899 return (Val >= 0 && Val < 4096);
1900 }
1901
1902 bool isMemImm12Offset() const {
1903 // If we have an immediate that's not a constant, treat it as a label
1904 // reference needing a fixup. If it is a constant, it's something else
1905 // and we reject it.
1906
1907 if (isImm() && !isa<MCConstantExpr>(getImm()))
1908 return true;
1909
1910 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1911 return false;
1912 // Immediate offset in range [-4095, 4095].
1913 if (!Memory.OffsetImm) return true;
1914 int64_t Val = Memory.OffsetImm->getValue();
1915 return (Val > -4096 && Val < 4096) ||
1916 (Val == std::numeric_limits<int32_t>::min());
1917 }
1918
1919 bool isConstPoolAsmImm() const {
1920 // Delay processing of Constant Pool Immediate, this will turn into
1921 // a constant. Match no other operand
1922 return (isConstantPoolImm());
1923 }
1924
1925 bool isPostIdxImm8() const {
1926 if (!isImm()) return false;
1927 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1928 if (!CE) return false;
1929 int64_t Val = CE->getValue();
1930 return (Val > -256 && Val < 256) ||
1931 (Val == std::numeric_limits<int32_t>::min());
1932 }
1933
1934 bool isPostIdxImm8s4() const {
1935 if (!isImm()) return false;
1936 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1937 if (!CE) return false;
1938 int64_t Val = CE->getValue();
1939 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1940 (Val == std::numeric_limits<int32_t>::min());
1941 }
1942
1943 bool isMSRMask() const { return Kind == k_MSRMask; }
1944 bool isBankedReg() const { return Kind == k_BankedReg; }
1945 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1946
1947 // NEON operands.
1948 bool isSingleSpacedVectorList() const {
1949 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1950 }
1951
1952 bool isDoubleSpacedVectorList() const {
1953 return Kind == k_VectorList && VectorList.isDoubleSpaced;
1954 }
1955
1956 bool isVecListOneD() const {
1957 if (!isSingleSpacedVectorList()) return false;
1958 return VectorList.Count == 1;
1959 }
1960
1961 bool isVecListTwoMQ() const {
1962 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
1963 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1964 VectorList.RegNum);
1965 }
1966
1967 bool isVecListDPair() const {
1968 if (!isSingleSpacedVectorList()) return false;
1969 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1970 .contains(VectorList.RegNum));
1971 }
1972
1973 bool isVecListThreeD() const {
1974 if (!isSingleSpacedVectorList()) return false;
1975 return VectorList.Count == 3;
1976 }
1977
1978 bool isVecListFourD() const {
1979 if (!isSingleSpacedVectorList()) return false;
1980 return VectorList.Count == 4;
1981 }
1982
1983 bool isVecListDPairSpaced() const {
1984 if (Kind != k_VectorList) return false;
1985 if (isSingleSpacedVectorList()) return false;
1986 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1987 .contains(VectorList.RegNum));
1988 }
1989
1990 bool isVecListThreeQ() const {
1991 if (!isDoubleSpacedVectorList()) return false;
1992 return VectorList.Count == 3;
1993 }
1994
1995 bool isVecListFourQ() const {
1996 if (!isDoubleSpacedVectorList()) return false;
1997 return VectorList.Count == 4;
1998 }
1999
2000 bool isVecListFourMQ() const {
2001 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2002 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2003 VectorList.RegNum);
2004 }
2005
2006 bool isSingleSpacedVectorAllLanes() const {
2007 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2008 }
2009
2010 bool isDoubleSpacedVectorAllLanes() const {
2011 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2012 }
2013
2014 bool isVecListOneDAllLanes() const {
2015 if (!isSingleSpacedVectorAllLanes()) return false;
2016 return VectorList.Count == 1;
2017 }
2018
2019 bool isVecListDPairAllLanes() const {
2020 if (!isSingleSpacedVectorAllLanes()) return false;
2021 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2022 .contains(VectorList.RegNum));
2023 }
2024
2025 bool isVecListDPairSpacedAllLanes() const {
2026 if (!isDoubleSpacedVectorAllLanes()) return false;
2027 return VectorList.Count == 2;
2028 }
2029
2030 bool isVecListThreeDAllLanes() const {
2031 if (!isSingleSpacedVectorAllLanes()) return false;
2032 return VectorList.Count == 3;
2033 }
2034
2035 bool isVecListThreeQAllLanes() const {
2036 if (!isDoubleSpacedVectorAllLanes()) return false;
2037 return VectorList.Count == 3;
2038 }
2039
2040 bool isVecListFourDAllLanes() const {
2041 if (!isSingleSpacedVectorAllLanes()) return false;
2042 return VectorList.Count == 4;
2043 }
2044
2045 bool isVecListFourQAllLanes() const {
2046 if (!isDoubleSpacedVectorAllLanes()) return false;
2047 return VectorList.Count == 4;
2048 }
2049
2050 bool isSingleSpacedVectorIndexed() const {
2051 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2052 }
2053
2054 bool isDoubleSpacedVectorIndexed() const {
2055 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2056 }
2057
2058 bool isVecListOneDByteIndexed() const {
2059 if (!isSingleSpacedVectorIndexed()) return false;
2060 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2061 }
2062
2063 bool isVecListOneDHWordIndexed() const {
2064 if (!isSingleSpacedVectorIndexed()) return false;
2065 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2066 }
2067
2068 bool isVecListOneDWordIndexed() const {
2069 if (!isSingleSpacedVectorIndexed()) return false;
2070 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2071 }
2072
2073 bool isVecListTwoDByteIndexed() const {
2074 if (!isSingleSpacedVectorIndexed()) return false;
2075 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2076 }
2077
2078 bool isVecListTwoDHWordIndexed() const {
2079 if (!isSingleSpacedVectorIndexed()) return false;
2080 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2081 }
2082
2083 bool isVecListTwoQWordIndexed() const {
2084 if (!isDoubleSpacedVectorIndexed()) return false;
2085 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2086 }
2087
2088 bool isVecListTwoQHWordIndexed() const {
2089 if (!isDoubleSpacedVectorIndexed()) return false;
2090 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2091 }
2092
2093 bool isVecListTwoDWordIndexed() const {
2094 if (!isSingleSpacedVectorIndexed()) return false;
2095 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2096 }
2097
2098 bool isVecListThreeDByteIndexed() const {
2099 if (!isSingleSpacedVectorIndexed()) return false;
2100 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2101 }
2102
2103 bool isVecListThreeDHWordIndexed() const {
2104 if (!isSingleSpacedVectorIndexed()) return false;
2105 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2106 }
2107
2108 bool isVecListThreeQWordIndexed() const {
2109 if (!isDoubleSpacedVectorIndexed()) return false;
2110 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2111 }
2112
2113 bool isVecListThreeQHWordIndexed() const {
2114 if (!isDoubleSpacedVectorIndexed()) return false;
2115 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2116 }
2117
2118 bool isVecListThreeDWordIndexed() const {
2119 if (!isSingleSpacedVectorIndexed()) return false;
2120 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2121 }
2122
2123 bool isVecListFourDByteIndexed() const {
2124 if (!isSingleSpacedVectorIndexed()) return false;
2125 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2126 }
2127
2128 bool isVecListFourDHWordIndexed() const {
2129 if (!isSingleSpacedVectorIndexed()) return false;
2130 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2131 }
2132
2133 bool isVecListFourQWordIndexed() const {
2134 if (!isDoubleSpacedVectorIndexed()) return false;
2135 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2136 }
2137
2138 bool isVecListFourQHWordIndexed() const {
2139 if (!isDoubleSpacedVectorIndexed()) return false;
2140 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2141 }
2142
2143 bool isVecListFourDWordIndexed() const {
2144 if (!isSingleSpacedVectorIndexed()) return false;
2145 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2146 }
2147
2148 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2149
2150 template <unsigned NumLanes>
2151 bool isVectorIndexInRange() const {
2152 if (Kind != k_VectorIndex) return false;
2153 return VectorIndex.Val < NumLanes;
2154 }
2155
2156 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2157 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2158 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2159 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2160
2161 template<int PermittedValue, int OtherPermittedValue>
2162 bool isMVEPairVectorIndex() const {
2163 if (Kind != k_VectorIndex) return false;
2164 return VectorIndex.Val == PermittedValue ||
2165 VectorIndex.Val == OtherPermittedValue;
2166 }
2167
2168 bool isNEONi8splat() const {
2169 if (!isImm()) return false;
2170 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2171 // Must be a constant.
2172 if (!CE) return false;
2173 int64_t Value = CE->getValue();
2174 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2175 // value.
2176 return Value >= 0 && Value < 256;
2177 }
2178
2179 bool isNEONi16splat() const {
2180 if (isNEONByteReplicate(2))
2181 return false; // Leave that for bytes replication and forbid by default.
2182 if (!isImm())
2183 return false;
2184 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2185 // Must be a constant.
2186 if (!CE) return false;
2187 unsigned Value = CE->getValue();
2188 return ARM_AM::isNEONi16splat(Value);
2189 }
2190
2191 bool isNEONi16splatNot() const {
2192 if (!isImm())
2193 return false;
2194 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2195 // Must be a constant.
2196 if (!CE) return false;
2197 unsigned Value = CE->getValue();
2198 return ARM_AM::isNEONi16splat(~Value & 0xffff);
2199 }
2200
2201 bool isNEONi32splat() const {
2202 if (isNEONByteReplicate(4))
2203 return false; // Leave that for bytes replication and forbid by default.
2204 if (!isImm())
2205 return false;
2206 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2207 // Must be a constant.
2208 if (!CE) return false;
2209 unsigned Value = CE->getValue();
2210 return ARM_AM::isNEONi32splat(Value);
2211 }
2212
2213 bool isNEONi32splatNot() const {
2214 if (!isImm())
2215 return false;
2216 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2217 // Must be a constant.
2218 if (!CE) return false;
2219 unsigned Value = CE->getValue();
2220 return ARM_AM::isNEONi32splat(~Value);
2221 }
2222
2223 static bool isValidNEONi32vmovImm(int64_t Value) {
2224 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2225 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2226 return ((Value & 0xffffffffffffff00) == 0) ||
2227 ((Value & 0xffffffffffff00ff) == 0) ||
2228 ((Value & 0xffffffffff00ffff) == 0) ||
2229 ((Value & 0xffffffff00ffffff) == 0) ||
2230 ((Value & 0xffffffffffff00ff) == 0xff) ||
2231 ((Value & 0xffffffffff00ffff) == 0xffff);
2232 }
2233
2234 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2235 assert((Width == 8 || Width == 16 || Width == 32) &&(((Width == 8 || Width == 16 || Width == 32) && "Invalid element width"
) ? static_cast<void> (0) : __assert_fail ("(Width == 8 || Width == 16 || Width == 32) && \"Invalid element width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2236, __PRETTY_FUNCTION__))
2236 "Invalid element width")(((Width == 8 || Width == 16 || Width == 32) && "Invalid element width"
) ? static_cast<void> (0) : __assert_fail ("(Width == 8 || Width == 16 || Width == 32) && \"Invalid element width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2236, __PRETTY_FUNCTION__))
;
2237 assert(NumElems * Width <= 64 && "Invalid result width")((NumElems * Width <= 64 && "Invalid result width"
) ? static_cast<void> (0) : __assert_fail ("NumElems * Width <= 64 && \"Invalid result width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2237, __PRETTY_FUNCTION__))
;
2238
2239 if (!isImm())
2240 return false;
2241 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2242 // Must be a constant.
2243 if (!CE)
2244 return false;
2245 int64_t Value = CE->getValue();
2246 if (!Value)
2247 return false; // Don't bother with zero.
2248 if (Inv)
2249 Value = ~Value;
2250
2251 uint64_t Mask = (1ull << Width) - 1;
2252 uint64_t Elem = Value & Mask;
2253 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2254 return false;
2255 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2256 return false;
2257
2258 for (unsigned i = 1; i < NumElems; ++i) {
2259 Value >>= Width;
2260 if ((Value & Mask) != Elem)
2261 return false;
2262 }
2263 return true;
2264 }
2265
2266 bool isNEONByteReplicate(unsigned NumBytes) const {
2267 return isNEONReplicate(8, NumBytes, false);
2268 }
2269
2270 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2271 assert((FromW == 8 || FromW == 16 || FromW == 32) &&(((FromW == 8 || FromW == 16 || FromW == 32) && "Invalid source width"
) ? static_cast<void> (0) : __assert_fail ("(FromW == 8 || FromW == 16 || FromW == 32) && \"Invalid source width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2272, __PRETTY_FUNCTION__))
2272 "Invalid source width")(((FromW == 8 || FromW == 16 || FromW == 32) && "Invalid source width"
) ? static_cast<void> (0) : __assert_fail ("(FromW == 8 || FromW == 16 || FromW == 32) && \"Invalid source width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2272, __PRETTY_FUNCTION__))
;
2273 assert((ToW == 16 || ToW == 32 || ToW == 64) &&(((ToW == 16 || ToW == 32 || ToW == 64) && "Invalid destination width"
) ? static_cast<void> (0) : __assert_fail ("(ToW == 16 || ToW == 32 || ToW == 64) && \"Invalid destination width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2274, __PRETTY_FUNCTION__))
2274 "Invalid destination width")(((ToW == 16 || ToW == 32 || ToW == 64) && "Invalid destination width"
) ? static_cast<void> (0) : __assert_fail ("(ToW == 16 || ToW == 32 || ToW == 64) && \"Invalid destination width\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2274, __PRETTY_FUNCTION__))
;
2275 assert(FromW < ToW && "ToW is not less than FromW")((FromW < ToW && "ToW is not less than FromW") ? static_cast
<void> (0) : __assert_fail ("FromW < ToW && \"ToW is not less than FromW\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2275, __PRETTY_FUNCTION__))
;
2276 }
2277
2278 template<unsigned FromW, unsigned ToW>
2279 bool isNEONmovReplicate() const {
2280 checkNeonReplicateArgs(FromW, ToW);
2281 if (ToW == 64 && isNEONi64splat())
2282 return false;
2283 return isNEONReplicate(FromW, ToW / FromW, false);
2284 }
2285
2286 template<unsigned FromW, unsigned ToW>
2287 bool isNEONinvReplicate() const {
2288 checkNeonReplicateArgs(FromW, ToW);
2289 return isNEONReplicate(FromW, ToW / FromW, true);
2290 }
2291
2292 bool isNEONi32vmov() const {
2293 if (isNEONByteReplicate(4))
2294 return false; // Let it to be classified as byte-replicate case.
2295 if (!isImm())
2296 return false;
2297 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2298 // Must be a constant.
2299 if (!CE)
2300 return false;
2301 return isValidNEONi32vmovImm(CE->getValue());
2302 }
2303
2304 bool isNEONi32vmovNeg() const {
2305 if (!isImm()) return false;
2306 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307 // Must be a constant.
2308 if (!CE) return false;
2309 return isValidNEONi32vmovImm(~CE->getValue());
2310 }
2311
2312 bool isNEONi64splat() const {
2313 if (!isImm()) return false;
2314 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2315 // Must be a constant.
2316 if (!CE) return false;
2317 uint64_t Value = CE->getValue();
2318 // i64 value with each byte being either 0 or 0xff.
2319 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2320 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2321 return true;
2322 }
2323
2324 template<int64_t Angle, int64_t Remainder>
2325 bool isComplexRotation() const {
2326 if (!isImm()) return false;
2327
2328 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2329 if (!CE) return false;
2330 uint64_t Value = CE->getValue();
2331
2332 return (Value % Angle == Remainder && Value <= 270);
2333 }
2334
2335 bool isMVELongShift() const {
2336 if (!isImm()) return false;
2337 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2338 // Must be a constant.
2339 if (!CE) return false;
2340 uint64_t Value = CE->getValue();
2341 return Value >= 1 && Value <= 32;
2342 }
2343
2344 bool isMveSaturateOp() const {
2345 if (!isImm()) return false;
2346 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2347 if (!CE) return false;
2348 uint64_t Value = CE->getValue();
2349 return Value == 48 || Value == 64;
2350 }
2351
2352 bool isITCondCodeNoAL() const {
2353 if (!isITCondCode()) return false;
2354 ARMCC::CondCodes CC = getCondCode();
2355 return CC != ARMCC::AL;
2356 }
2357
2358 bool isITCondCodeRestrictedI() const {
2359 if (!isITCondCode())
2360 return false;
2361 ARMCC::CondCodes CC = getCondCode();
2362 return CC == ARMCC::EQ || CC == ARMCC::NE;
2363 }
2364
2365 bool isITCondCodeRestrictedS() const {
2366 if (!isITCondCode())
2367 return false;
2368 ARMCC::CondCodes CC = getCondCode();
2369 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2370 CC == ARMCC::GE;
2371 }
2372
2373 bool isITCondCodeRestrictedU() const {
2374 if (!isITCondCode())
2375 return false;
2376 ARMCC::CondCodes CC = getCondCode();
2377 return CC == ARMCC::HS || CC == ARMCC::HI;
2378 }
2379
2380 bool isITCondCodeRestrictedFP() const {
2381 if (!isITCondCode())
2382 return false;
2383 ARMCC::CondCodes CC = getCondCode();
2384 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2385 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2386 }
2387
2388 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2389 // Add as immediates when possible. Null MCExpr = 0.
2390 if (!Expr)
2391 Inst.addOperand(MCOperand::createImm(0));
2392 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2393 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2394 else
2395 Inst.addOperand(MCOperand::createExpr(Expr));
2396 }
2397
2398 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2399 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2399, __PRETTY_FUNCTION__))
;
2400 addExpr(Inst, getImm());
2401 }
2402
2403 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2404 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2404, __PRETTY_FUNCTION__))
;
2405 addExpr(Inst, getImm());
2406 }
2407
2408 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2409 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2409, __PRETTY_FUNCTION__))
;
2410 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2411 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2412 Inst.addOperand(MCOperand::createReg(RegNum));
2413 }
2414
2415 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2416 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2416, __PRETTY_FUNCTION__))
;
2417 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2418 unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2419 Inst.addOperand(MCOperand::createReg(RegNum));
2420 }
2421
2422 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2423 assert(N == 3 && "Invalid number of operands!")((N == 3 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2423, __PRETTY_FUNCTION__))
;
2424 addVPTPredNOperands(Inst, N-1);
2425 unsigned RegNum;
2426 if (getVPTPred() == ARMVCC::None) {
2427 RegNum = 0;
2428 } else {
2429 unsigned NextOpIndex = Inst.getNumOperands();
2430 const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2431 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2432 assert(TiedOp >= 0 &&((TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!"
) ? static_cast<void> (0) : __assert_fail ("TiedOp >= 0 && \"Inactive register in vpred_r is not tied to an output!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2433, __PRETTY_FUNCTION__))
2433 "Inactive register in vpred_r is not tied to an output!")((TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!"
) ? static_cast<void> (0) : __assert_fail ("TiedOp >= 0 && \"Inactive register in vpred_r is not tied to an output!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2433, __PRETTY_FUNCTION__))
;
2434 RegNum = Inst.getOperand(TiedOp).getReg();
2435 }
2436 Inst.addOperand(MCOperand::createReg(RegNum));
2437 }
2438
2439 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2440 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2440, __PRETTY_FUNCTION__))
;
2441 Inst.addOperand(MCOperand::createImm(getCoproc()));
2442 }
2443
2444 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2445 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2445, __PRETTY_FUNCTION__))
;
2446 Inst.addOperand(MCOperand::createImm(getCoproc()));
2447 }
2448
2449 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2450 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2450, __PRETTY_FUNCTION__))
;
2451 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2452 }
2453
2454 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2455 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2455, __PRETTY_FUNCTION__))
;
2456 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2457 }
2458
2459 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2460 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2460, __PRETTY_FUNCTION__))
;
2461 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2462 }
2463
2464 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2465 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2465, __PRETTY_FUNCTION__))
;
2466 Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2467 }
2468
2469 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2470 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2470, __PRETTY_FUNCTION__))
;
2471 Inst.addOperand(MCOperand::createReg(getReg()));
2472 }
2473
2474 void addRegOperands(MCInst &Inst, unsigned N) const {
2475 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2475, __PRETTY_FUNCTION__))
;
2476 Inst.addOperand(MCOperand::createReg(getReg()));
2477 }
2478
2479 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2480 assert(N == 3 && "Invalid number of operands!")((N == 3 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2480, __PRETTY_FUNCTION__))
;
2481 assert(isRegShiftedReg() &&((isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!"
) ? static_cast<void> (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2482, __PRETTY_FUNCTION__))
2482 "addRegShiftedRegOperands() on non-RegShiftedReg!")((isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!"
) ? static_cast<void> (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2482, __PRETTY_FUNCTION__))
;
2483 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2484 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2485 Inst.addOperand(MCOperand::createImm(
2486 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2487 }
2488
2489 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2490 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2490, __PRETTY_FUNCTION__))
;
2491 assert(isRegShiftedImm() &&((isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!"
) ? static_cast<void> (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2492, __PRETTY_FUNCTION__))
2492 "addRegShiftedImmOperands() on non-RegShiftedImm!")((isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!"
) ? static_cast<void> (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2492, __PRETTY_FUNCTION__))
;
2493 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2494 // Shift of #32 is encoded as 0 where permitted
2495 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2496 Inst.addOperand(MCOperand::createImm(
2497 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2498 }
2499
2500 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2501 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2501, __PRETTY_FUNCTION__))
;
2502 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2503 ShifterImm.Imm));
2504 }
2505
2506 void addRegListOperands(MCInst &Inst, unsigned N) const {
2507 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2507, __PRETTY_FUNCTION__))
;
2508 const SmallVectorImpl<unsigned> &RegList = getRegList();
2509 for (SmallVectorImpl<unsigned>::const_iterator
2510 I = RegList.begin(), E = RegList.end(); I != E; ++I)
2511 Inst.addOperand(MCOperand::createReg(*I));
2512 }
2513
2514 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2515 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2515, __PRETTY_FUNCTION__))
;
2516 const SmallVectorImpl<unsigned> &RegList = getRegList();
2517 for (SmallVectorImpl<unsigned>::const_iterator
2518 I = RegList.begin(), E = RegList.end(); I != E; ++I)
2519 Inst.addOperand(MCOperand::createReg(*I));
2520 }
2521
2522 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2523 addRegListOperands(Inst, N);
2524 }
2525
2526 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2527 addRegListOperands(Inst, N);
2528 }
2529
2530 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2531 addRegListOperands(Inst, N);
2532 }
2533
2534 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2535 addRegListOperands(Inst, N);
2536 }
2537
2538 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2539 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2539, __PRETTY_FUNCTION__))
;
2540 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2541 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2542 }
2543
2544 void addModImmOperands(MCInst &Inst, unsigned N) const {
2545 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2545, __PRETTY_FUNCTION__))
;
2546
2547 // Support for fixups (MCFixup)
2548 if (isImm())
2549 return addImmOperands(Inst, N);
2550
2551 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2552 }
2553
2554 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2555 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2555, __PRETTY_FUNCTION__))
;
2556 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2557 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2558 Inst.addOperand(MCOperand::createImm(Enc));
2559 }
2560
2561 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2562 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2562, __PRETTY_FUNCTION__))
;
2563 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2564 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2565 Inst.addOperand(MCOperand::createImm(Enc));
2566 }
2567
2568 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2569 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2569, __PRETTY_FUNCTION__))
;
2570 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2571 uint32_t Val = -CE->getValue();
2572 Inst.addOperand(MCOperand::createImm(Val));
2573 }
2574
2575 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2576 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2576, __PRETTY_FUNCTION__))
;
2577 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2578 uint32_t Val = -CE->getValue();
2579 Inst.addOperand(MCOperand::createImm(Val));
2580 }
2581
2582 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2583 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2583, __PRETTY_FUNCTION__))
;
2584 // Munge the lsb/width into a bitfield mask.
2585 unsigned lsb = Bitfield.LSB;
2586 unsigned width = Bitfield.Width;
2587 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2588 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2589 (32 - (lsb + width)));
2590 Inst.addOperand(MCOperand::createImm(Mask));
2591 }
2592
2593 void addImmOperands(MCInst &Inst, unsigned N) const {
2594 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2594, __PRETTY_FUNCTION__))
;
2595 addExpr(Inst, getImm());
2596 }
2597
2598 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2599 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2599, __PRETTY_FUNCTION__))
;
2600 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2601 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2602 }
2603
2604 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2605 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2605, __PRETTY_FUNCTION__))
;
2606 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2607 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2608 }
2609
2610 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2611 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2611, __PRETTY_FUNCTION__))
;
2612 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2613 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2614 Inst.addOperand(MCOperand::createImm(Val));
2615 }
2616
2617 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2618 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2618, __PRETTY_FUNCTION__))
;
2619 // FIXME: We really want to scale the value here, but the LDRD/STRD
2620 // instruction don't encode operands that way yet.
2621 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2622 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2623 }
2624
2625 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2626 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2626, __PRETTY_FUNCTION__))
;
2627 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2628 // instruction don't encode operands that way yet.
2629 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2630 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2631 }
2632
2633 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2634 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2634, __PRETTY_FUNCTION__))
;
2635 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2636 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2637 }
2638
2639 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2640 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2640, __PRETTY_FUNCTION__))
;
2641 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2642 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2643 }
2644
2645 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2646 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2646, __PRETTY_FUNCTION__))
;
2647 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2648 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2649 }
2650
2651 void addImm7Operands(MCInst &Inst, unsigned N) const {
2652 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2652, __PRETTY_FUNCTION__))
;
2653 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2654 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2655 }
2656
2657 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2658 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2658, __PRETTY_FUNCTION__))
;
2659 // The immediate is scaled by four in the encoding and is stored
2660 // in the MCInst as such. Lop off the low two bits here.
2661 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2662 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2663 }
2664
2665 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2666 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2666, __PRETTY_FUNCTION__))
;
2667 // The immediate is scaled by four in the encoding and is stored
2668 // in the MCInst as such. Lop off the low two bits here.
2669 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2670 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2671 }
2672
2673 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2674 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2674, __PRETTY_FUNCTION__))
;
2675 // The immediate is scaled by four in the encoding and is stored
2676 // in the MCInst as such. Lop off the low two bits here.
2677 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2678 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2679 }
2680
2681 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2682 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2682, __PRETTY_FUNCTION__))
;
2683 // The constant encodes as the immediate-1, and we store in the instruction
2684 // the bits as encoded, so subtract off one here.
2685 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2686 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2687 }
2688
2689 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2690 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2690, __PRETTY_FUNCTION__))
;
2691 // The constant encodes as the immediate-1, and we store in the instruction
2692 // the bits as encoded, so subtract off one here.
2693 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2694 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2695 }
2696
2697 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2698 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2698, __PRETTY_FUNCTION__))
;
2699 // The constant encodes as the immediate, except for 32, which encodes as
2700 // zero.
2701 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2702 unsigned Imm = CE->getValue();
2703 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2704 }
2705
2706 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2707 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2707, __PRETTY_FUNCTION__))
;
2708 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2709 // the instruction as well.
2710 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2711 int Val = CE->getValue();
2712 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2713 }
2714
2715 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2716 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2716, __PRETTY_FUNCTION__))
;
2717 // The operand is actually a t2_so_imm, but we have its bitwise
2718 // negation in the assembly source, so twiddle it here.
2719 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2720 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2721 }
2722
2723 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2724 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2724, __PRETTY_FUNCTION__))
;
2725 // The operand is actually a t2_so_imm, but we have its
2726 // negation in the assembly source, so twiddle it here.
2727 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2728 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2729 }
2730
2731 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2732 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2732, __PRETTY_FUNCTION__))
;
2733 // The operand is actually an imm0_4095, but we have its
2734 // negation in the assembly source, so twiddle it here.
2735 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2736 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2737 }
2738
2739 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2740 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2741 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2742 return;
2743 }
2744 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2745 Inst.addOperand(MCOperand::createExpr(SR));
2746 }
2747
2748 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2749 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2749, __PRETTY_FUNCTION__))
;
2750 if (isImm()) {
2751 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2752 if (CE) {
2753 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2754 return;
2755 }
2756 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2757 Inst.addOperand(MCOperand::createExpr(SR));
2758 return;
2759 }
2760
2761 assert(isGPRMem() && "Unknown value type!")((isGPRMem() && "Unknown value type!") ? static_cast<
void> (0) : __assert_fail ("isGPRMem() && \"Unknown value type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2761, __PRETTY_FUNCTION__))
;
2762 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!")((isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!"
) ? static_cast<void> (0) : __assert_fail ("isa<MCConstantExpr>(Memory.OffsetImm) && \"Unknown value type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2762, __PRETTY_FUNCTION__))
;
2763 Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2764 }
2765
2766 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2767 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2767, __PRETTY_FUNCTION__))
;
2768 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2769 }
2770
2771 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2772 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2772, __PRETTY_FUNCTION__))
;
2773 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2774 }
2775
2776 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2777 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2777, __PRETTY_FUNCTION__))
;
2778 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2779 }
2780
2781 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2782 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2782, __PRETTY_FUNCTION__))
;
2783 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2784 }
2785
2786 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2787 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2787, __PRETTY_FUNCTION__))
;
2788 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2789 }
2790
2791 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2792 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2792, __PRETTY_FUNCTION__))
;
2793 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2794 }
2795
2796 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2797 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2797, __PRETTY_FUNCTION__))
;
2798 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2799 }
2800
2801 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2802 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2802, __PRETTY_FUNCTION__))
;
2803 int32_t Imm = Memory.OffsetImm->getValue();
2804 Inst.addOperand(MCOperand::createImm(Imm));
2805 }
2806
2807 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2808 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2808, __PRETTY_FUNCTION__))
;
2809 assert(isImm() && "Not an immediate!")((isImm() && "Not an immediate!") ? static_cast<void
> (0) : __assert_fail ("isImm() && \"Not an immediate!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2809, __PRETTY_FUNCTION__))
;
2810
2811 // If we have an immediate that's not a constant, treat it as a label
2812 // reference needing a fixup.
2813 if (!isa<MCConstantExpr>(getImm())) {
2814 Inst.addOperand(MCOperand::createExpr(getImm()));
2815 return;
2816 }
2817
2818 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2819 int Val = CE->getValue();
2820 Inst.addOperand(MCOperand::createImm(Val));
2821 }
2822
2823 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2824 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2824, __PRETTY_FUNCTION__))
;
2825 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2826 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2827 }
2828
2829 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2830 addAlignedMemoryOperands(Inst, N);
2831 }
2832
2833 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2834 addAlignedMemoryOperands(Inst, N);
2835 }
2836
2837 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2838 addAlignedMemoryOperands(Inst, N);
2839 }
2840
2841 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2842 addAlignedMemoryOperands(Inst, N);
2843 }
2844
2845 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2846 addAlignedMemoryOperands(Inst, N);
2847 }
2848
2849 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2850 addAlignedMemoryOperands(Inst, N);
2851 }
2852
2853 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2854 addAlignedMemoryOperands(Inst, N);
2855 }
2856
2857 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2858 addAlignedMemoryOperands(Inst, N);
2859 }
2860
2861 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2862 addAlignedMemoryOperands(Inst, N);
2863 }
2864
2865 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2866 addAlignedMemoryOperands(Inst, N);
2867 }
2868
2869 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2870 addAlignedMemoryOperands(Inst, N);
2871 }
2872
2873 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2874 assert(N == 3 && "Invalid number of operands!")((N == 3 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2874, __PRETTY_FUNCTION__))
;
2875 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2876 if (!Memory.OffsetRegNum) {
2877 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2878 // Special case for #-0
2879 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2880 if (Val < 0) Val = -Val;
2881 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2882 } else {
2883 // For register offset, we encode the shift type and negation flag
2884 // here.
2885 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2886 Memory.ShiftImm, Memory.ShiftType);
2887 }
2888 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2889 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2890 Inst.addOperand(MCOperand::createImm(Val));
2891 }
2892
2893 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2894 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2894, __PRETTY_FUNCTION__))
;
2895 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2896 assert(CE && "non-constant AM2OffsetImm operand!")((CE && "non-constant AM2OffsetImm operand!") ? static_cast
<void> (0) : __assert_fail ("CE && \"non-constant AM2OffsetImm operand!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2896, __PRETTY_FUNCTION__))
;
2897 int32_t Val = CE->getValue();
2898 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2899 // Special case for #-0
2900 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2901 if (Val < 0) Val = -Val;
2902 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2903 Inst.addOperand(MCOperand::createReg(0));
2904 Inst.addOperand(MCOperand::createImm(Val));
2905 }
2906
2907 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2908 assert(N == 3 && "Invalid number of operands!")((N == 3 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2908, __PRETTY_FUNCTION__))
;
2909 // If we have an immediate that's not a constant, treat it as a label
2910 // reference needing a fixup. If it is a constant, it's something else
2911 // and we reject it.
2912 if (isImm()) {
2913 Inst.addOperand(MCOperand::createExpr(getImm()));
2914 Inst.addOperand(MCOperand::createReg(0));
2915 Inst.addOperand(MCOperand::createImm(0));
2916 return;
2917 }
2918
2919 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2920 if (!Memory.OffsetRegNum) {
2921 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2922 // Special case for #-0
2923 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2924 if (Val < 0) Val = -Val;
2925 Val = ARM_AM::getAM3Opc(AddSub, Val);
2926 } else {
2927 // For register offset, we encode the shift type and negation flag
2928 // here.
2929 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2930 }
2931 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2932 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2933 Inst.addOperand(MCOperand::createImm(Val));
2934 }
2935
2936 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2937 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2937, __PRETTY_FUNCTION__))
;
2938 if (Kind == k_PostIndexRegister) {
2939 int32_t Val =
2940 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2941 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2942 Inst.addOperand(MCOperand::createImm(Val));
2943 return;
2944 }
2945
2946 // Constant offset.
2947 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2948 int32_t Val = CE->getValue();
2949 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2950 // Special case for #-0
2951 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2952 if (Val < 0) Val = -Val;
2953 Val = ARM_AM::getAM3Opc(AddSub, Val);
2954 Inst.addOperand(MCOperand::createReg(0));
2955 Inst.addOperand(MCOperand::createImm(Val));
2956 }
2957
2958 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2959 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2959, __PRETTY_FUNCTION__))
;
2960 // If we have an immediate that's not a constant, treat it as a label
2961 // reference needing a fixup. If it is a constant, it's something else
2962 // and we reject it.
2963 if (isImm()) {
2964 Inst.addOperand(MCOperand::createExpr(getImm()));
2965 Inst.addOperand(MCOperand::createImm(0));
2966 return;
2967 }
2968
2969 // The lower two bits are always zero and as such are not encoded.
2970 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2971 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2972 // Special case for #-0
2973 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2974 if (Val < 0) Val = -Val;
2975 Val = ARM_AM::getAM5Opc(AddSub, Val);
2976 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2977 Inst.addOperand(MCOperand::createImm(Val));
2978 }
2979
2980 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2981 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2981, __PRETTY_FUNCTION__))
;
2982 // If we have an immediate that's not a constant, treat it as a label
2983 // reference needing a fixup. If it is a constant, it's something else
2984 // and we reject it.
2985 if (isImm()) {
2986 Inst.addOperand(MCOperand::createExpr(getImm()));
2987 Inst.addOperand(MCOperand::createImm(0));
2988 return;
2989 }
2990
2991 // The lower bit is always zero and as such is not encoded.
2992 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2993 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2994 // Special case for #-0
2995 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2996 if (Val < 0) Val = -Val;
2997 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2998 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2999 Inst.addOperand(MCOperand::createImm(Val));
3000 }
3001
3002 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3003 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3003, __PRETTY_FUNCTION__))
;
3004 // If we have an immediate that's not a constant, treat it as a label
3005 // reference needing a fixup. If it is a constant, it's something else
3006 // and we reject it.
3007 if (isImm()) {
3008 Inst.addOperand(MCOperand::createExpr(getImm()));
3009 Inst.addOperand(MCOperand::createImm(0));
3010 return;
3011 }
3012
3013 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
3014 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3015 Inst.addOperand(MCOperand::createImm(Val));
3016 }
3017
3018 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3019 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3019, __PRETTY_FUNCTION__))
;
3020 // If we have an immediate that's not a constant, treat it as a label
3021 // reference needing a fixup. If it is a constant, it's something else
3022 // and we reject it.
3023 if (isImm()) {
3024 Inst.addOperand(MCOperand::createExpr(getImm()));
3025 Inst.addOperand(MCOperand::createImm(0));
3026 return;
3027 }
3028
3029 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
3030 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3031 Inst.addOperand(MCOperand::createImm(Val));
3032 }
3033
3034 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3035 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3035, __PRETTY_FUNCTION__))
;
3036 // The lower two bits are always zero and as such are not encoded.
3037 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
3038 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3039 Inst.addOperand(MCOperand::createImm(Val));
3040 }
3041
3042 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3043 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3043, __PRETTY_FUNCTION__))
;
3044 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
3045 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3046 Inst.addOperand(MCOperand::createImm(Val));
3047 }
3048
3049 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3050 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3050, __PRETTY_FUNCTION__))
;
3051 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3052 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3053 }
3054
3055 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3056 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3056, __PRETTY_FUNCTION__))
;
3057 // If this is an immediate, it's a label reference.
3058 if (isImm()) {
3059 addExpr(Inst, getImm());
3060 Inst.addOperand(MCOperand::createImm(0));
3061 return;
3062 }
3063
3064 // Otherwise, it's a normal memory reg+offset.
3065 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
3066 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3067 Inst.addOperand(MCOperand::createImm(Val));
3068 }
3069
3070 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3071 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3071, __PRETTY_FUNCTION__))
;
3072 // If this is an immediate, it's a label reference.
3073 if (isImm()) {
3074 addExpr(Inst, getImm());
3075 Inst.addOperand(MCOperand::createImm(0));
3076 return;
3077 }
3078
3079 // Otherwise, it's a normal memory reg+offset.
3080 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
3081 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3082 Inst.addOperand(MCOperand::createImm(Val));
3083 }
3084
3085 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3086 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3086, __PRETTY_FUNCTION__))
;
3087 // This is container for the immediate that we will create the constant
3088 // pool from
3089 addExpr(Inst, getConstantPoolImm());
3090 return;
3091 }
3092
3093 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3094 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3094, __PRETTY_FUNCTION__))
;
3095 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3096 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3097 }
3098
3099 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3100 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3100, __PRETTY_FUNCTION__))
;
3101 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3102 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3103 }
3104
3105 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3106 assert(N == 3 && "Invalid number of operands!")((N == 3 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3106, __PRETTY_FUNCTION__))
;
3107 unsigned Val =
3108 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3109 Memory.ShiftImm, Memory.ShiftType);
3110 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3111 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3112 Inst.addOperand(MCOperand::createImm(Val));
3113 }
3114
3115 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3116 assert(N == 3 && "Invalid number of operands!")((N == 3 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3116, __PRETTY_FUNCTION__))
;
3117 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3118 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3119 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3120 }
3121
3122 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3123 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3123, __PRETTY_FUNCTION__))
;
3124 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3125 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3126 }
3127
3128 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3129 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3129, __PRETTY_FUNCTION__))
;
3130 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
3131 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3132 Inst.addOperand(MCOperand::createImm(Val));
3133 }
3134
3135 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3136 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3136, __PRETTY_FUNCTION__))
;
3137 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
3138 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3139 Inst.addOperand(MCOperand::createImm(Val));
3140 }
3141
3142 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3143 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3143, __PRETTY_FUNCTION__))
;
3144 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
3145 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146 Inst.addOperand(MCOperand::createImm(Val));
3147 }
3148
3149 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3150 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3150, __PRETTY_FUNCTION__))
;
3151 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
3152 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3153 Inst.addOperand(MCOperand::createImm(Val));
3154 }
3155
3156 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3157 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3157, __PRETTY_FUNCTION__))
;
3158 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3159 assert(CE && "non-constant post-idx-imm8 operand!")((CE && "non-constant post-idx-imm8 operand!") ? static_cast
<void> (0) : __assert_fail ("CE && \"non-constant post-idx-imm8 operand!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3159, __PRETTY_FUNCTION__))
;
3160 int Imm = CE->getValue();
3161 bool isAdd = Imm >= 0;
3162 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3163 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3164 Inst.addOperand(MCOperand::createImm(Imm));
3165 }
3166
3167 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3168 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3168, __PRETTY_FUNCTION__))
;
3169 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3170 assert(CE && "non-constant post-idx-imm8s4 operand!")((CE && "non-constant post-idx-imm8s4 operand!") ? static_cast
<void> (0) : __assert_fail ("CE && \"non-constant post-idx-imm8s4 operand!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3170, __PRETTY_FUNCTION__))
;
3171 int Imm = CE->getValue();
3172 bool isAdd = Imm >= 0;
3173 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3174 // Immediate is scaled by 4.
3175 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3176 Inst.addOperand(MCOperand::createImm(Imm));
3177 }
3178
3179 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3180 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3180, __PRETTY_FUNCTION__))
;
3181 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3182 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3183 }
3184
3185 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3186 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3186, __PRETTY_FUNCTION__))
;
3187 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3188 // The sign, shift type, and shift amount are encoded in a single operand
3189 // using the AM2 encoding helpers.
3190 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3191 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3192 PostIdxReg.ShiftTy);
3193 Inst.addOperand(MCOperand::createImm(Imm));
3194 }
3195
3196 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3197 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3197, __PRETTY_FUNCTION__))
;
3198 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3199 Inst.addOperand(MCOperand::createImm(CE->getValue()));
3200 }
3201
3202 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3203 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3203, __PRETTY_FUNCTION__))
;
3204 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3205 }
3206
3207 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3208 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3208, __PRETTY_FUNCTION__))
;
3209 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3210 }
3211
3212 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3213 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3213, __PRETTY_FUNCTION__))
;
3214 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3215 }
3216
3217 void addVecListOperands(MCInst &Inst, unsigned N) const {
3218 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3218, __PRETTY_FUNCTION__))
;
3219 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3220 }
3221
3222 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3223 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3223, __PRETTY_FUNCTION__))
;
3224
3225 // When we come here, the VectorList field will identify a range
3226 // of q-registers by its base register and length, and it will
3227 // have already been error-checked to be the expected length of
3228 // range and contain only q-regs in the range q0-q7. So we can
3229 // count on the base register being in the range q0-q6 (for 2
3230 // regs) or q0-q4 (for 4)
3231 //
3232 // The MVE instructions taking a register range of this kind will
3233 // need an operand in the QQPR or QQQQPR class, representing the
3234 // entire range as a unit. So we must translate into that class,
3235 // by finding the index of the base register in the MQPR reg
3236 // class, and returning the super-register at the corresponding
3237 // index in the target class.
3238
3239 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3240 const MCRegisterClass *RC_out = (VectorList.Count == 2) ?
3241 &ARMMCRegisterClasses[ARM::QQPRRegClassID] :
3242 &ARMMCRegisterClasses[ARM::QQQQPRRegClassID];
3243
3244 unsigned I, E = RC_out->getNumRegs();
3245 for (I = 0; I < E; I++)
3246 if (RC_in->getRegister(I) == VectorList.RegNum)
3247 break;
3248 assert(I < E && "Invalid vector list start register!")((I < E && "Invalid vector list start register!") ?
static_cast<void> (0) : __assert_fail ("I < E && \"Invalid vector list start register!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3248, __PRETTY_FUNCTION__))
;
3249
3250 Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3251 }
3252
3253 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3254 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3254, __PRETTY_FUNCTION__))
;
3255 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3256 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3257 }
3258
3259 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3260 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3260, __PRETTY_FUNCTION__))
;
3261 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3262 }
3263
3264 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3265 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3265, __PRETTY_FUNCTION__))
;
3266 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3267 }
3268
3269 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3270 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3270, __PRETTY_FUNCTION__))
;
3271 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3272 }
3273
3274 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3275 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3275, __PRETTY_FUNCTION__))
;
3276 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3277 }
3278
3279 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3280 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3280, __PRETTY_FUNCTION__))
;
3281 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3282 }
3283
3284 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3285 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3285, __PRETTY_FUNCTION__))
;
3286 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3287 }
3288
3289 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3290 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3290, __PRETTY_FUNCTION__))
;
3291 // The immediate encodes the type of constant as well as the value.
3292 // Mask in that this is an i8 splat.
3293 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3294 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3295 }
3296
3297 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3298 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3298, __PRETTY_FUNCTION__))
;
3299 // The immediate encodes the type of constant as well as the value.
3300 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3301 unsigned Value = CE->getValue();
3302 Value = ARM_AM::encodeNEONi16splat(Value);
3303 Inst.addOperand(MCOperand::createImm(Value));
3304 }
3305
3306 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3307 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3307, __PRETTY_FUNCTION__))
;
3308 // The immediate encodes the type of constant as well as the value.
3309 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3310 unsigned Value = CE->getValue();
3311 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3312 Inst.addOperand(MCOperand::createImm(Value));
3313 }
3314
3315 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3316 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3316, __PRETTY_FUNCTION__))
;
3317 // The immediate encodes the type of constant as well as the value.
3318 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3319 unsigned Value = CE->getValue();
3320 Value = ARM_AM::encodeNEONi32splat(Value);
3321 Inst.addOperand(MCOperand::createImm(Value));
3322 }
3323
3324 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3325 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3325, __PRETTY_FUNCTION__))
;
3326 // The immediate encodes the type of constant as well as the value.
3327 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3328 unsigned Value = CE->getValue();
3329 Value = ARM_AM::encodeNEONi32splat(~Value);
3330 Inst.addOperand(MCOperand::createImm(Value));
3331 }
3332
3333 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3334 // The immediate encodes the type of constant as well as the value.
3335 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3336 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||(((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM
::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3339, __PRETTY_FUNCTION__))
3337 Inst.getOpcode() == ARM::VMOVv16i8) &&(((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM
::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3339, __PRETTY_FUNCTION__))
3338 "All instructions that wants to replicate non-zero byte "(((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM
::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3339, __PRETTY_FUNCTION__))
3339 "always must be replaced with VMOVv8i8 or VMOVv16i8.")(((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM
::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3339, __PRETTY_FUNCTION__))
;
3340 unsigned Value = CE->getValue();
3341 if (Inv)
3342 Value = ~Value;
3343 unsigned B = Value & 0xff;
3344 B |= 0xe00; // cmode = 0b1110
3345 Inst.addOperand(MCOperand::createImm(B));
3346 }
3347
3348 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3349 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3349, __PRETTY_FUNCTION__))
;
3350 addNEONi8ReplicateOperands(Inst, true);
3351 }
3352
3353 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3354 if (Value >= 256 && Value <= 0xffff)
3355 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3356 else if (Value > 0xffff && Value <= 0xffffff)
3357 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3358 else if (Value > 0xffffff)
3359 Value = (Value >> 24) | 0x600;
3360 return Value;
3361 }
3362
3363 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3364 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3364, __PRETTY_FUNCTION__))
;
3365 // The immediate encodes the type of constant as well as the value.
3366 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3367 unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3368 Inst.addOperand(MCOperand::createImm(Value));
3369 }
3370
3371 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3372 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3372, __PRETTY_FUNCTION__))
;
3373 addNEONi8ReplicateOperands(Inst, false);
3374 }
3375
3376 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3377 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3377, __PRETTY_FUNCTION__))
;
3378 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3379 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||(((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM
::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode
() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3384, __PRETTY_FUNCTION__))
3380 Inst.getOpcode() == ARM::VMOVv8i16 ||(((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM
::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode
() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3384, __PRETTY_FUNCTION__))
3381 Inst.getOpcode() == ARM::VMVNv4i16 ||(((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM
::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode
() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3384, __PRETTY_FUNCTION__))
3382 Inst.getOpcode() == ARM::VMVNv8i16) &&(((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM
::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode
() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3384, __PRETTY_FUNCTION__))
3383 "All instructions that want to replicate non-zero half-word "(((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM
::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode
() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3384, __PRETTY_FUNCTION__))
3384 "always must be replaced with V{MOV,MVN}v{4,8}i16.")(((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM
::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode
() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3384, __PRETTY_FUNCTION__))
;
3385 uint64_t Value = CE->getValue();
3386 unsigned Elem = Value & 0xffff;
3387 if (Elem >= 256)
3388 Elem = (Elem >> 8) | 0x200;
3389 Inst.addOperand(MCOperand::createImm(Elem));
3390 }
3391
3392 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3393 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3393, __PRETTY_FUNCTION__))
;
3394 // The immediate encodes the type of constant as well as the value.
3395 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3396 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3397 Inst.addOperand(MCOperand::createImm(Value));
3398 }
3399
3400 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3401 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3401, __PRETTY_FUNCTION__))
;
3402 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3403 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||(((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM
::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode
() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3408, __PRETTY_FUNCTION__))
3404 Inst.getOpcode() == ARM::VMOVv4i32 ||(((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM
::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode
() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3408, __PRETTY_FUNCTION__))
3405 Inst.getOpcode() == ARM::VMVNv2i32 ||(((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM
::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode
() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3408, __PRETTY_FUNCTION__))
3406 Inst.getOpcode() == ARM::VMVNv4i32) &&(((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM
::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode
() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3408, __PRETTY_FUNCTION__))
3407 "All instructions that want to replicate non-zero word "(((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM
::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode
() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3408, __PRETTY_FUNCTION__))
3408 "always must be replaced with V{MOV,MVN}v{2,4}i32.")(((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM
::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode
() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word "
"always must be replaced with V{MOV,MVN}v{2,4}i32.") ? static_cast
<void> (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3408, __PRETTY_FUNCTION__))
;
3409 uint64_t Value = CE->getValue();
3410 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3411 Inst.addOperand(MCOperand::createImm(Elem));
3412 }
3413
3414 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3415 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3415, __PRETTY_FUNCTION__))
;
3416 // The immediate encodes the type of constant as well as the value.
3417 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3418 uint64_t Value = CE->getValue();
3419 unsigned Imm = 0;
3420 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3421 Imm |= (Value & 1) << i;
3422 }
3423 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3424 }
3425
3426 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3427 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3427, __PRETTY_FUNCTION__))
;
3428 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3429 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3430 }
3431
3432 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3433 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3433, __PRETTY_FUNCTION__))
;
3434 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3435 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3436 }
3437
3438 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3439 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3439, __PRETTY_FUNCTION__))
;
3440 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3441 unsigned Imm = CE->getValue();
3442 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand")(((Imm == 48 || Imm == 64) && "Invalid saturate operand"
) ? static_cast<void> (0) : __assert_fail ("(Imm == 48 || Imm == 64) && \"Invalid saturate operand\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3442, __PRETTY_FUNCTION__))
;
3443 Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3444 }
3445
3446 void print(raw_ostream &OS) const override;
3447
3448 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3449 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3450 Op->ITMask.Mask = Mask;
3451 Op->StartLoc = S;
3452 Op->EndLoc = S;
3453 return Op;
3454 }
3455
3456 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3457 SMLoc S) {
3458 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3459 Op->CC.Val = CC;
3460 Op->StartLoc = S;
3461 Op->EndLoc = S;
3462 return Op;
3463 }
3464
3465 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3466 SMLoc S) {
3467 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3468 Op->VCC.Val = CC;
3469 Op->StartLoc = S;
3470 Op->EndLoc = S;
3471 return Op;
3472 }
3473
3474 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3475 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3476 Op->Cop.Val = CopVal;
3477 Op->StartLoc = S;
3478 Op->EndLoc = S;
3479 return Op;
3480 }
3481
3482 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3483 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3484 Op->Cop.Val = CopVal;
3485 Op->StartLoc = S;
3486 Op->EndLoc = S;
3487 return Op;
3488 }
3489
3490 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3491 SMLoc E) {
3492 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3493 Op->Cop.Val = Val;
3494 Op->StartLoc = S;
3495 Op->EndLoc = E;
3496 return Op;
3497 }
3498
3499 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3500 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3501 Op->Reg.RegNum = RegNum;
3502 Op->StartLoc = S;
3503 Op->EndLoc = S;
3504 return Op;
3505 }
3506
3507 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3508 auto Op = std::make_unique<ARMOperand>(k_Token);
3509 Op->Tok.Data = Str.data();
3510 Op->Tok.Length = Str.size();
3511 Op->StartLoc = S;
3512 Op->EndLoc = S;
3513 return Op;
3514 }
3515
3516 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3517 SMLoc E) {
3518 auto Op = std::make_unique<ARMOperand>(k_Register);
3519 Op->Reg.RegNum = RegNum;
3520 Op->StartLoc = S;
3521 Op->EndLoc = E;
3522 return Op;
3523 }
3524
3525 static std::unique_ptr<ARMOperand>
3526 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3527 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3528 SMLoc E) {
3529 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3530 Op->RegShiftedReg.ShiftTy = ShTy;
3531 Op->RegShiftedReg.SrcReg = SrcReg;
3532 Op->RegShiftedReg.ShiftReg = ShiftReg;
3533 Op->RegShiftedReg.ShiftImm = ShiftImm;
3534 Op->StartLoc = S;
3535 Op->EndLoc = E;
3536 return Op;
3537 }
3538
3539 static std::unique_ptr<ARMOperand>
3540 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3541 unsigned ShiftImm, SMLoc S, SMLoc E) {
3542 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3543 Op->RegShiftedImm.ShiftTy = ShTy;
3544 Op->RegShiftedImm.SrcReg = SrcReg;
3545 Op->RegShiftedImm.ShiftImm = ShiftImm;
3546 Op->StartLoc = S;
3547 Op->EndLoc = E;
3548 return Op;
3549 }
3550
3551 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3552 SMLoc S, SMLoc E) {
3553 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3554 Op->ShifterImm.isASR = isASR;
3555 Op->ShifterImm.Imm = Imm;
3556 Op->StartLoc = S;
3557 Op->EndLoc = E;
3558 return Op;
3559 }
3560
3561 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3562 SMLoc E) {
3563 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3564 Op->RotImm.Imm = Imm;
3565 Op->StartLoc = S;
3566 Op->EndLoc = E;
3567 return Op;
3568 }
3569
3570 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3571 SMLoc S, SMLoc E) {
3572 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3573 Op->ModImm.Bits = Bits;
3574 Op->ModImm.Rot = Rot;
3575 Op->StartLoc = S;
3576 Op->EndLoc = E;
3577 return Op;
3578 }
3579
3580 static std::unique_ptr<ARMOperand>
3581 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3582 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3583 Op->Imm.Val = Val;
3584 Op->StartLoc = S;
3585 Op->EndLoc = E;
3586 return Op;
3587 }
3588
3589 static std::unique_ptr<ARMOperand>
3590 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3591 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3592 Op->Bitfield.LSB = LSB;
3593 Op->Bitfield.Width = Width;
3594 Op->StartLoc = S;
3595 Op->EndLoc = E;
3596 return Op;
3597 }
3598
3599 static std::unique_ptr<ARMOperand>
3600 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3601 SMLoc StartLoc, SMLoc EndLoc) {
3602 assert(Regs.size() > 0 && "RegList contains no registers?")((Regs.size() > 0 && "RegList contains no registers?"
) ? static_cast<void> (0) : __assert_fail ("Regs.size() > 0 && \"RegList contains no registers?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3602, __PRETTY_FUNCTION__))
;
3603 KindTy Kind = k_RegisterList;
3604
3605 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3606 Regs.front().second)) {
3607 if (Regs.back().second == ARM::VPR)
3608 Kind = k_FPDRegisterListWithVPR;
3609 else
3610 Kind = k_DPRRegisterList;
3611 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3612 Regs.front().second)) {
3613 if (Regs.back().second == ARM::VPR)
3614 Kind = k_FPSRegisterListWithVPR;
3615 else
3616 Kind = k_SPRRegisterList;
3617 }
3618
3619 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3620 Kind = k_RegisterListWithAPSR;
3621
3622 assert(std::is_sorted(Regs.begin(), Regs.end()) &&((std::is_sorted(Regs.begin(), Regs.end()) && "Register list must be sorted by encoding"
) ? static_cast<void> (0) : __assert_fail ("std::is_sorted(Regs.begin(), Regs.end()) && \"Register list must be sorted by encoding\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3623, __PRETTY_FUNCTION__))
3623 "Register list must be sorted by encoding")((std::is_sorted(Regs.begin(), Regs.end()) && "Register list must be sorted by encoding"
) ? static_cast<void> (0) : __assert_fail ("std::is_sorted(Regs.begin(), Regs.end()) && \"Register list must be sorted by encoding\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3623, __PRETTY_FUNCTION__))
;
3624
3625 auto Op = std::make_unique<ARMOperand>(Kind);
3626 for (const auto &P : Regs)
3627 Op->Registers.push_back(P.second);
3628
3629 Op->StartLoc = StartLoc;
3630 Op->EndLoc = EndLoc;
3631 return Op;
3632 }
3633
3634 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3635 unsigned Count,
3636 bool isDoubleSpaced,
3637 SMLoc S, SMLoc E) {
3638 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3639 Op->VectorList.RegNum = RegNum;
3640 Op->VectorList.Count = Count;
3641 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3642 Op->StartLoc = S;
3643 Op->EndLoc = E;
3644 return Op;
3645 }
3646
3647 static std::unique_ptr<ARMOperand>
3648 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3649 SMLoc S, SMLoc E) {
3650 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3651 Op->VectorList.RegNum = RegNum;
3652 Op->VectorList.Count = Count;
3653 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3654 Op->StartLoc = S;
3655 Op->EndLoc = E;
3656 return Op;
3657 }
3658
3659 static std::unique_ptr<ARMOperand>
3660 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3661 bool isDoubleSpaced, SMLoc S, SMLoc E) {
3662 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3663 Op->VectorList.RegNum = RegNum;
3664 Op->VectorList.Count = Count;
3665 Op->VectorList.LaneIndex = Index;
3666 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3667 Op->StartLoc = S;
3668 Op->EndLoc = E;
3669 return Op;
3670 }
3671
3672 static std::unique_ptr<ARMOperand>
3673 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3674 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3675 Op->VectorIndex.Val = Idx;
3676 Op->StartLoc = S;
3677 Op->EndLoc = E;
3678 return Op;
3679 }
3680
3681 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3682 SMLoc E) {
3683 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3684 Op->Imm.Val = Val;
3685 Op->StartLoc = S;
3686 Op->EndLoc = E;
3687 return Op;
3688 }
3689
3690 static std::unique_ptr<ARMOperand>
3691 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3692 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3693 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3694 SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3695 auto Op = std::make_unique<ARMOperand>(k_Memory);
3696 Op->Memory.BaseRegNum = BaseRegNum;
3697 Op->Memory.OffsetImm = OffsetImm;
3698 Op->Memory.OffsetRegNum = OffsetRegNum;
3699 Op->Memory.ShiftType = ShiftType;
3700 Op->Memory.ShiftImm = ShiftImm;
3701 Op->Memory.Alignment = Alignment;
3702 Op->Memory.isNegative = isNegative;
3703 Op->StartLoc = S;
3704 Op->EndLoc = E;
3705 Op->AlignmentLoc = AlignmentLoc;
3706 return Op;
3707 }
3708
3709 static std::unique_ptr<ARMOperand>
3710 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3711 unsigned ShiftImm, SMLoc S, SMLoc E) {
3712 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3713 Op->PostIdxReg.RegNum = RegNum;
3714 Op->PostIdxReg.isAdd = isAdd;
3715 Op->PostIdxReg.ShiftTy = ShiftTy;
3716 Op->PostIdxReg.ShiftImm = ShiftImm;
3717 Op->StartLoc = S;
3718 Op->EndLoc = E;
3719 return Op;
3720 }
3721
3722 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3723 SMLoc S) {
3724 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3725 Op->MBOpt.Val = Opt;
3726 Op->StartLoc = S;
3727 Op->EndLoc = S;
3728 return Op;
3729 }
3730
3731 static std::unique_ptr<ARMOperand>
3732 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3733 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3734 Op->ISBOpt.Val = Opt;
3735 Op->StartLoc = S;
3736 Op->EndLoc = S;
3737 return Op;
3738 }
3739
3740 static std::unique_ptr<ARMOperand>
3741 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3742 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3743 Op->TSBOpt.Val = Opt;
3744 Op->StartLoc = S;
3745 Op->EndLoc = S;
3746 return Op;
3747 }
3748
3749 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3750 SMLoc S) {
3751 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3752 Op->IFlags.Val = IFlags;
3753 Op->StartLoc = S;
3754 Op->EndLoc = S;
3755 return Op;
3756 }
3757
3758 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3759 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3760 Op->MMask.Val = MMask;
3761 Op->StartLoc = S;
3762 Op->EndLoc = S;
3763 return Op;
3764 }
3765
3766 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3767 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3768 Op->BankedReg.Val = Reg;
3769 Op->StartLoc = S;
3770 Op->EndLoc = S;
3771 return Op;
3772 }
3773};
3774
3775} // end anonymous namespace.
3776
3777void ARMOperand::print(raw_ostream &OS) const {
3778 auto RegName = [](unsigned Reg) {
3779 if (Reg)
3780 return ARMInstPrinter::getRegisterName(Reg);
3781 else
3782 return "noreg";
3783 };
3784
3785 switch (Kind) {
3786 case k_CondCode:
3787 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3788 break;
3789 case k_VPTPred:
3790 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3791 break;
3792 case k_CCOut:
3793 OS << "<ccout " << RegName(getReg()) << ">";
3794 break;
3795 case k_ITCondMask: {
3796 static const char *const MaskStr[] = {
3797 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3798 "(tt)", "(ttet)", "(tte)", "(ttee)",
3799 "(t)", "(tett)", "(tet)", "(tete)",
3800 "(te)", "(teet)", "(tee)", "(teee)",
3801 };
3802 assert((ITMask.Mask & 0xf) == ITMask.Mask)(((ITMask.Mask & 0xf) == ITMask.Mask) ? static_cast<void
> (0) : __assert_fail ("(ITMask.Mask & 0xf) == ITMask.Mask"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3802, __PRETTY_FUNCTION__))
;
3803 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3804 break;
3805 }
3806 case k_CoprocNum:
3807 OS << "<coprocessor number: " << getCoproc() << ">";
3808 break;
3809 case k_CoprocReg:
3810 OS << "<coprocessor register: " << getCoproc() << ">";
3811 break;
3812 case k_CoprocOption:
3813 OS << "<coprocessor option: " << CoprocOption.Val << ">";
3814 break;
3815 case k_MSRMask:
3816 OS << "<mask: " << getMSRMask() << ">";
3817 break;
3818 case k_BankedReg:
3819 OS << "<banked reg: " << getBankedReg() << ">";
3820 break;
3821 case k_Immediate:
3822 OS << *getImm();
3823 break;
3824 case k_MemBarrierOpt:
3825 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3826 break;
3827 case k_InstSyncBarrierOpt:
3828 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3829 break;
3830 case k_TraceSyncBarrierOpt:
3831 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3832 break;
3833 case k_Memory:
3834 OS << "<memory";
3835 if (Memory.BaseRegNum)
3836 OS << " base:" << RegName(Memory.BaseRegNum);
3837 if (Memory.OffsetImm)
3838 OS << " offset-imm:" << *Memory.OffsetImm;
3839 if (Memory.OffsetRegNum)
3840 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3841 << RegName(Memory.OffsetRegNum);
3842 if (Memory.ShiftType != ARM_AM::no_shift) {
3843 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3844 OS << " shift-imm:" << Memory.ShiftImm;
3845 }
3846 if (Memory.Alignment)
3847 OS << " alignment:" << Memory.Alignment;
3848 OS << ">";
3849 break;
3850 case k_PostIndexRegister:
3851 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3852 << RegName(PostIdxReg.RegNum);
3853 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3854 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3855 << PostIdxReg.ShiftImm;
3856 OS << ">";
3857 break;
3858 case k_ProcIFlags: {
3859 OS << "<ARM_PROC::";
3860 unsigned IFlags = getProcIFlags();
3861 for (int i=2; i >= 0; --i)
3862 if (IFlags & (1 << i))
3863 OS << ARM_PROC::IFlagsToString(1 << i);
3864 OS << ">";
3865 break;
3866 }
3867 case k_Register:
3868 OS << "<register " << RegName(getReg()) << ">";
3869 break;
3870 case k_ShifterImmediate:
3871 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3872 << " #" << ShifterImm.Imm << ">";
3873 break;
3874 case k_ShiftedRegister:
3875 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3876 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3877 << RegName(RegShiftedReg.ShiftReg) << ">";
3878 break;
3879 case k_ShiftedImmediate:
3880 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3881 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3882 << RegShiftedImm.ShiftImm << ">";
3883 break;
3884 case k_RotateImmediate:
3885 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3886 break;
3887 case k_ModifiedImmediate:
3888 OS << "<mod_imm #" << ModImm.Bits << ", #"
3889 << ModImm.Rot << ")>";
3890 break;
3891 case k_ConstantPoolImmediate:
3892 OS << "<constant_pool_imm #" << *getConstantPoolImm();
3893 break;
3894 case k_BitfieldDescriptor:
3895 OS << "<bitfield " << "lsb: " << Bitfield.LSB
3896 << ", width: " << Bitfield.Width << ">";
3897 break;
3898 case k_RegisterList:
3899 case k_RegisterListWithAPSR:
3900 case k_DPRRegisterList:
3901 case k_SPRRegisterList:
3902 case k_FPSRegisterListWithVPR:
3903 case k_FPDRegisterListWithVPR: {
3904 OS << "<register_list ";
3905
3906 const SmallVectorImpl<unsigned> &RegList = getRegList();
3907 for (SmallVectorImpl<unsigned>::const_iterator
3908 I = RegList.begin(), E = RegList.end(); I != E; ) {
3909 OS << RegName(*I);
3910 if (++I < E) OS << ", ";
3911 }
3912
3913 OS << ">";
3914 break;
3915 }
3916 case k_VectorList:
3917 OS << "<vector_list " << VectorList.Count << " * "
3918 << RegName(VectorList.RegNum) << ">";
3919 break;
3920 case k_VectorListAllLanes:
3921 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3922 << RegName(VectorList.RegNum) << ">";
3923 break;
3924 case k_VectorListIndexed:
3925 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3926 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
3927 break;
3928 case k_Token:
3929 OS << "'" << getToken() << "'";
3930 break;
3931 case k_VectorIndex:
3932 OS << "<vectorindex " << getVectorIndex() << ">";
3933 break;
3934 }
3935}
3936
3937/// @name Auto-generated Match Functions
3938/// {
3939
3940static unsigned MatchRegisterName(StringRef Name);
3941
3942/// }
3943
3944bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3945 SMLoc &StartLoc, SMLoc &EndLoc) {
3946 const AsmToken &Tok = getParser().getTok();
3947 StartLoc = Tok.getLoc();
3948 EndLoc = Tok.getEndLoc();
3949 RegNo = tryParseRegister();
3950
3951 return (RegNo == (unsigned)-1);
3952}
3953
3954OperandMatchResultTy ARMAsmParser::tryParseRegister(unsigned &RegNo,
3955 SMLoc &StartLoc,
3956 SMLoc &EndLoc) {
3957 if (ParseRegister(RegNo, StartLoc, EndLoc))
3958 return MatchOperand_NoMatch;
3959 return MatchOperand_Success;
3960}
3961
3962/// Try to parse a register name. The token must be an Identifier when called,
3963/// and if it is a register name the token is eaten and the register number is
3964/// returned. Otherwise return -1.
3965int ARMAsmParser::tryParseRegister() {
3966 MCAsmParser &Parser = getParser();
3967 const AsmToken &Tok = Parser.getTok();
3968 if (Tok.isNot(AsmToken::Identifier)) return -1;
3969
3970 std::string lowerCase = Tok.getString().lower();
3971 unsigned RegNum = MatchRegisterName(lowerCase);
3972 if (!RegNum) {
3973 RegNum = StringSwitch<unsigned>(lowerCase)
3974 .Case("r13", ARM::SP)
3975 .Case("r14", ARM::LR)
3976 .Case("r15", ARM::PC)
3977 .Case("ip", ARM::R12)
3978 // Additional register name aliases for 'gas' compatibility.
3979 .Case("a1", ARM::R0)
3980 .Case("a2", ARM::R1)
3981 .Case("a3", ARM::R2)
3982 .Case("a4", ARM::R3)
3983 .Case("v1", ARM::R4)
3984 .Case("v2", ARM::R5)
3985 .Case("v3", ARM::R6)
3986 .Case("v4", ARM::R7)
3987 .Case("v5", ARM::R8)
3988 .Case("v6", ARM::R9)
3989 .Case("v7", ARM::R10)
3990 .Case("v8", ARM::R11)
3991 .Case("sb", ARM::R9)
3992 .Case("sl", ARM::R10)
3993 .Case("fp", ARM::R11)
3994 .Default(0);
3995 }
3996 if (!RegNum) {
3997 // Check for aliases registered via .req. Canonicalize to lower case.
3998 // That's more consistent since register names are case insensitive, and
3999 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4000 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4001 // If no match, return failure.
4002 if (Entry == RegisterReqs.end())
4003 return -1;
4004 Parser.Lex(); // Eat identifier token.
4005 return Entry->getValue();
4006 }
4007
4008 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4009 if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4010 return -1;
4011
4012 Parser.Lex(); // Eat identifier token.
4013
4014 return RegNum;
4015}
4016
4017// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4018// If a recoverable error occurs, return 1. If an irrecoverable error
4019// occurs, return -1. An irrecoverable error is one where tokens have been
4020// consumed in the process of trying to parse the shifter (i.e., when it is
4021// indeed a shifter operand, but malformed).
4022int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4023 MCAsmParser &Parser = getParser();
4024 SMLoc S = Parser.getTok().getLoc();
4025 const AsmToken &Tok = Parser.getTok();
4026 if (Tok.isNot(AsmToken::Identifier))
4027 return -1;
4028
4029 std::string lowerCase = Tok.getString().lower();
4030 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
4031 .Case("asl", ARM_AM::lsl)
4032 .Case("lsl", ARM_AM::lsl)
4033 .Case("lsr", ARM_AM::lsr)
4034 .Case("asr", ARM_AM::asr)
4035 .Case("ror", ARM_AM::ror)
4036 .Case("rrx", ARM_AM::rrx)
4037 .Default(ARM_AM::no_shift);
4038
4039 if (ShiftTy == ARM_AM::no_shift)
4040 return 1;
4041
4042 Parser.Lex(); // Eat the operator.
4043
4044 // The source register for the shift has already been added to the
4045 // operand list, so we need to pop it off and combine it into the shifted
4046 // register operand instead.
4047 std::unique_ptr<ARMOperand> PrevOp(
4048 (ARMOperand *)Operands.pop_back_val().release());
4049 if (!PrevOp->isReg())
4050 return Error(PrevOp->getStartLoc(), "shift must be of a register");
4051 int SrcReg = PrevOp->getReg();
4052
4053 SMLoc EndLoc;
4054 int64_t Imm = 0;
4055 int ShiftReg = 0;
4056 if (ShiftTy == ARM_AM::rrx) {
4057 // RRX Doesn't have an explicit shift amount. The encoder expects
4058 // the shift register to be the same as the source register. Seems odd,
4059 // but OK.
4060 ShiftReg = SrcReg;
4061 } else {
4062 // Figure out if this is shifted by a constant or a register (for non-RRX).
4063 if (Parser.getTok().is(AsmToken::Hash) ||
4064 Parser.getTok().is(AsmToken::Dollar)) {
4065 Parser.Lex(); // Eat hash.
4066 SMLoc ImmLoc = Parser.getTok().getLoc();
4067 const MCExpr *ShiftExpr = nullptr;
4068 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4069 Error(ImmLoc, "invalid immediate shift value");
4070 return -1;
4071 }
4072 // The expression must be evaluatable as an immediate.
4073 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4074 if (!CE) {
4075 Error(ImmLoc, "invalid immediate shift value");
4076 return -1;
4077 }
4078 // Range check the immediate.
4079 // lsl, ror: 0 <= imm <= 31
4080 // lsr, asr: 0 <= imm <= 32
4081 Imm = CE->getValue();
4082 if (Imm < 0 ||
4083 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4084 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4085 Error(ImmLoc, "immediate shift value out of range");
4086 return -1;
4087 }
4088 // shift by zero is a nop. Always send it through as lsl.
4089 // ('as' compatibility)
4090 if (Imm == 0)
4091 ShiftTy = ARM_AM::lsl;
4092 } else if (Parser.getTok().is(AsmToken::Identifier)) {
4093 SMLoc L = Parser.getTok().getLoc();
4094 EndLoc = Parser.getTok().getEndLoc();
4095 ShiftReg = tryParseRegister();
4096 if (ShiftReg == -1) {
4097 Error(L, "expected immediate or register in shift operand");
4098 return -1;
4099 }
4100 } else {
4101 Error(Parser.getTok().getLoc(),
4102 "expected immediate or register in shift operand");
4103 return -1;
4104 }
4105 }
4106
4107 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4108 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4109 ShiftReg, Imm,
4110 S, EndLoc));
4111 else
4112 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4113 S, EndLoc));
4114
4115 return 0;
4116}
4117
4118/// Try to parse a register name. The token must be an Identifier when called.
4119/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4120/// if there is a "writeback". 'true' if it's not a register.
4121///
4122/// TODO this is likely to change to allow different register types and or to
4123/// parse for a specific register type.
4124bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4125 MCAsmParser &Parser = getParser();
4126 SMLoc RegStartLoc = Parser.getTok().getLoc();
4127 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4128 int RegNo = tryParseRegister();
4129 if (RegNo == -1)
4130 return true;
4131
4132 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4133
4134 const AsmToken &ExclaimTok = Parser.getTok();
4135 if (ExclaimTok.is(AsmToken::Exclaim)) {
4136 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4137 ExclaimTok.getLoc()));
4138 Parser.Lex(); // Eat exclaim token
4139 return false;
4140 }
4141
4142 // Also check for an index operand. This is only legal for vector registers,
4143 // but that'll get caught OK in operand matching, so we don't need to
4144 // explicitly filter everything else out here.
4145 if (Parser.getTok().is(AsmToken::LBrac)) {
4146 SMLoc SIdx = Parser.getTok().getLoc();
4147 Parser.Lex(); // Eat left bracket token.
4148
4149 const MCExpr *ImmVal;
4150 if (getParser().parseExpression(ImmVal))
4151 return true;
4152 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4153 if (!MCE)
4154 return TokError("immediate value expected for vector index");
4155
4156 if (Parser.getTok().isNot(AsmToken::RBrac))
4157 return Error(Parser.getTok().getLoc(), "']' expected");
4158
4159 SMLoc E = Parser.getTok().getEndLoc();
4160 Parser.Lex(); // Eat right bracket token.
4161
4162 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4163 SIdx, E,
4164 getContext()));
4165 }
4166
4167 return false;
4168}
4169
4170/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4171/// instruction with a symbolic operand name.
4172/// We accept "crN" syntax for GAS compatibility.
4173/// <operand-name> ::= <prefix><number>
4174/// If CoprocOp is 'c', then:
4175/// <prefix> ::= c | cr
4176/// If CoprocOp is 'p', then :
4177/// <prefix> ::= p
4178/// <number> ::= integer in range [0, 15]
4179static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4180 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4181 // but efficient.
4182 if (Name.size() < 2 || Name[0] != CoprocOp)
4183 return -1;
4184 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4185
4186 switch (Name.size()) {
4187 default: return -1;
4188 case 1:
4189 switch (Name[0]) {
4190 default: return -1;
4191 case '0': return 0;
4192 case '1': return 1;
4193 case '2': return 2;
4194 case '3': return 3;
4195 case '4': return 4;
4196 case '5': return 5;
4197 case '6': return 6;
4198 case '7': return 7;
4199 case '8': return 8;
4200 case '9': return 9;
4201 }
4202 case 2:
4203 if (Name[0] != '1')
4204 return -1;
4205 switch (Name[1]) {
4206 default: return -1;
4207 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4208 // However, old cores (v5/v6) did use them in that way.
4209 case '0': return 10;
4210 case '1': return 11;
4211 case '2': return 12;
4212 case '3': return 13;
4213 case '4': return 14;
4214 case '5': return 15;
4215 }
4216 }
4217}
4218
4219/// parseITCondCode - Try to parse a condition code for an IT instruction.
4220OperandMatchResultTy
4221ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4222 MCAsmParser &Parser = getParser();
4223 SMLoc S = Parser.getTok().getLoc();
4224 const AsmToken &Tok = Parser.getTok();
4225 if (!Tok.is(AsmToken::Identifier))
4226 return MatchOperand_NoMatch;
4227 unsigned CC = ARMCondCodeFromString(Tok.getString());
4228 if (CC == ~0U)
4229 return MatchOperand_NoMatch;
4230 Parser.Lex(); // Eat the token.
4231
4232 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4233
4234 return MatchOperand_Success;
4235}
4236
4237/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4238/// token must be an Identifier when called, and if it is a coprocessor
4239/// number, the token is eaten and the operand is added to the operand list.
4240OperandMatchResultTy
4241ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4242 MCAsmParser &Parser = getParser();
4243 SMLoc S = Parser.getTok().getLoc();
4244 const AsmToken &Tok = Parser.getTok();
4245 if (Tok.isNot(AsmToken::Identifier))
4246 return MatchOperand_NoMatch;
4247
4248 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4249 if (Num == -1)
4250 return MatchOperand_NoMatch;
4251 if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4252 return MatchOperand_NoMatch;
4253
4254 Parser.Lex(); // Eat identifier token.
4255 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4256 return MatchOperand_Success;
4257}
4258
4259/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4260/// token must be an Identifier when called, and if it is a coprocessor
4261/// number, the token is eaten and the operand is added to the operand list.
4262OperandMatchResultTy
4263ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4264 MCAsmParser &Parser = getParser();
4265 SMLoc S = Parser.getTok().getLoc();
4266 const AsmToken &Tok = Parser.getTok();
4267 if (Tok.isNot(AsmToken::Identifier))
4268 return MatchOperand_NoMatch;
4269
4270 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4271 if (Reg == -1)
4272 return MatchOperand_NoMatch;
4273
4274 Parser.Lex(); // Eat identifier token.
4275 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4276 return MatchOperand_Success;
4277}
4278
4279/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4280/// coproc_option : '{' imm0_255 '}'
4281OperandMatchResultTy
4282ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4283 MCAsmParser &Parser = getParser();
4284 SMLoc S = Parser.getTok().getLoc();
4285
4286 // If this isn't a '{', this isn't a coprocessor immediate operand.
4287 if (Parser.getTok().isNot(AsmToken::LCurly))
4288 return MatchOperand_NoMatch;
4289 Parser.Lex(); // Eat the '{'
4290
4291 const MCExpr *Expr;
4292 SMLoc Loc = Parser.getTok().getLoc();
4293 if (getParser().parseExpression(Expr)) {
4294 Error(Loc, "illegal expression");
4295 return MatchOperand_ParseFail;
4296 }
4297 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4298 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
4299 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
4300 return MatchOperand_ParseFail;
4301 }
4302 int Val = CE->getValue();
4303
4304 // Check for and consume the closing '}'
4305 if (Parser.getTok().isNot(AsmToken::RCurly))
4306 return MatchOperand_ParseFail;
4307 SMLoc E = Parser.getTok().getEndLoc();
4308 Parser.Lex(); // Eat the '}'
4309
4310 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4311 return MatchOperand_Success;
4312}
4313
4314// For register list parsing, we need to map from raw GPR register numbering
4315// to the enumeration values. The enumeration values aren't sorted by
4316// register number due to our using "sp", "lr" and "pc" as canonical names.
4317static unsigned getNextRegister(unsigned Reg) {
4318 // If this is a GPR, we need to do it manually, otherwise we can rely
4319 // on the sort ordering of the enumeration since the other reg-classes
4320 // are sane.
4321 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4322 return Reg + 1;
4323 switch(Reg) {
4324 default: llvm_unreachable("Invalid GPR number!")::llvm::llvm_unreachable_internal("Invalid GPR number!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 4324)
;
4325 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4326 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4327 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4328 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4329 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4330 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4331 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4332 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4333 }
4334}
4335
4336// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4337// success, or false, if duplicate encoding found.
4338static bool
4339insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4340 unsigned Enc, unsigned Reg) {
4341 Regs.emplace_back(Enc, Reg);
4342 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4343 if (J->first == Enc) {
4344 Regs.erase(J.base());
4345 return false;
4346 }
4347 if (J->first < Enc)
4348 break;
4349 std::swap(*I, *J);
4350 }
4351 return true;
4352}
4353
4354/// Parse a register list.
4355bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
4356 bool EnforceOrder) {
4357 MCAsmParser &Parser = getParser();
4358 if (Parser.getTok().isNot(AsmToken::LCurly))
4359 return TokError("Token is not a Left Curly Brace");
4360 SMLoc S = Parser.getTok().getLoc();
4361 Parser.Lex(); // Eat '{' token.
4362 SMLoc RegLoc = Parser.getTok().getLoc();
4363
4364 // Check the first register in the list to see what register class
4365 // this is a list of.
4366 int Reg = tryParseRegister();
4367 if (Reg == -1)
4368 return Error(RegLoc, "register expected");
4369
4370 // The reglist instructions have at most 16 registers, so reserve
4371 // space for that many.
4372 int EReg = 0;
4373 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4374
4375 // Allow Q regs and just interpret them as the two D sub-registers.
4376 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4377 Reg = getDRegFromQReg(Reg);
4378 EReg = MRI->getEncodingValue(Reg);
4379 Registers.emplace_back(EReg, Reg);
4380 ++Reg;
4381 }
4382 const MCRegisterClass *RC;
4383 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4384 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4385 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4386 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4387 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4388 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4389 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4390 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4391 else
4392 return Error(RegLoc, "invalid register in register list");
4393
4394 // Store the register.
4395 EReg = MRI->getEncodingValue(Reg);
4396 Registers.emplace_back(EReg, Reg);
4397
4398 // This starts immediately after the first register token in the list,
4399 // so we can see either a comma or a minus (range separator) as a legal
4400 // next token.
4401 while (Parser.getTok().is(AsmToken::Comma) ||
4402 Parser.getTok().is(AsmToken::Minus)) {
4403 if (Parser.getTok().is(AsmToken::Minus)) {
4404 Parser.Lex(); // Eat the minus.
4405 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4406 int EndReg = tryParseRegister();
4407 if (EndReg == -1)
4408 return Error(AfterMinusLoc, "register expected");
4409 // Allow Q regs and just interpret them as the two D sub-registers.
4410 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4411 EndReg = getDRegFromQReg(EndReg) + 1;
4412 // If the register is the same as the start reg, there's nothing
4413 // more to do.
4414 if (Reg == EndReg)
4415 continue;
4416 // The register must be in the same register class as the first.
4417 if (!RC->contains(EndReg))
4418 return Error(AfterMinusLoc, "invalid register in register list");
4419 // Ranges must go from low to high.
4420 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4421 return Error(AfterMinusLoc, "bad range in register list");
4422
4423 // Add all the registers in the range to the register list.
4424 while (Reg != EndReg) {
4425 Reg = getNextRegister(Reg);
4426 EReg = MRI->getEncodingValue(Reg);
4427 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4428 Warning(AfterMinusLoc, StringRef("duplicated register (") +
4429 ARMInstPrinter::getRegisterName(Reg) +
4430 ") in register list");
4431 }
4432 }
4433 continue;
4434 }
4435 Parser.Lex(); // Eat the comma.
4436 RegLoc = Parser.getTok().getLoc();
4437 int OldReg = Reg;
4438 const AsmToken RegTok = Parser.getTok();
4439 Reg = tryParseRegister();
4440 if (Reg == -1)
4441 return Error(RegLoc, "register expected");
4442 // Allow Q regs and just interpret them as the two D sub-registers.
4443 bool isQReg = false;
4444 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4445 Reg = getDRegFromQReg(Reg);
4446 isQReg = true;
4447 }
4448 if (!RC->contains(Reg) &&
4449 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4450 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4451 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4452 // subset of GPRRegClassId except it contains APSR as well.
4453 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4454 }
4455 if (Reg == ARM::VPR &&
4456 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4457 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4458 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4459 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4460 EReg = MRI->getEncodingValue(Reg);
4461 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4462 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4463 ") in register list");
4464 }
4465 continue;
4466 }
4467 // The register must be in the same register class as the first.
4468 if (!RC->contains(Reg))
4469 return Error(RegLoc, "invalid register in register list");
4470 // In most cases, the list must be monotonically increasing. An
4471 // exception is CLRM, which is order-independent anyway, so
4472 // there's no potential for confusion if you write clrm {r2,r1}
4473 // instead of clrm {r1,r2}.
4474 if (EnforceOrder &&
4475 MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4476 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4477 Warning(RegLoc, "register list not in ascending order");
4478 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4479 return Error(RegLoc, "register list not in ascending order");
4480 }
4481 // VFP register lists must also be contiguous.
4482 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4483 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4484 Reg != OldReg + 1)
4485 return Error(RegLoc, "non-contiguous register range");
4486 EReg = MRI->getEncodingValue(Reg);
4487 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4488 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4489 ") in register list");
4490 }
4491 if (isQReg) {
4492 EReg = MRI->getEncodingValue(++Reg);
4493 Registers.emplace_back(EReg, Reg);
4494 }
4495 }
4496
4497 if (Parser.getTok().isNot(AsmToken::RCurly))
4498 return Error(Parser.getTok().getLoc(), "'}' expected");
4499 SMLoc E = Parser.getTok().getEndLoc();
4500 Parser.Lex(); // Eat '}' token.
4501
4502 // Push the register list operand.
4503 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4504
4505 // The ARM system instruction variants for LDM/STM have a '^' token here.
4506 if (Parser.getTok().is(AsmToken::Caret)) {
4507 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4508 Parser.Lex(); // Eat '^' token.
4509 }
4510
4511 return false;
4512}
4513
4514// Helper function to parse the lane index for vector lists.
4515OperandMatchResultTy ARMAsmParser::
4516parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4517 MCAsmParser &Parser = getParser();
4518 Index = 0; // Always return a defined index value.
4519 if (Parser.getTok().is(AsmToken::LBrac)) {
4520 Parser.Lex(); // Eat the '['.
4521 if (Parser.getTok().is(AsmToken::RBrac)) {
4522 // "Dn[]" is the 'all lanes' syntax.
4523 LaneKind = AllLanes;
4524 EndLoc = Parser.getTok().getEndLoc();
4525 Parser.Lex(); // Eat the ']'.
4526 return MatchOperand_Success;
4527 }
4528
4529 // There's an optional '#' token here. Normally there wouldn't be, but
4530 // inline assemble puts one in, and it's friendly to accept that.
4531 if (Parser.getTok().is(AsmToken::Hash))
4532 Parser.Lex(); // Eat '#' or '$'.
4533
4534 const MCExpr *LaneIndex;
4535 SMLoc Loc = Parser.getTok().getLoc();
4536 if (getParser().parseExpression(LaneIndex)) {
4537 Error(Loc, "illegal expression");
4538 return MatchOperand_ParseFail;
4539 }
4540 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4541 if (!CE) {
4542 Error(Loc, "lane index must be empty or an integer");
4543 return MatchOperand_ParseFail;
4544 }
4545 if (Parser.getTok().isNot(AsmToken::RBrac)) {
4546 Error(Parser.getTok().getLoc(), "']' expected");
4547 return MatchOperand_ParseFail;
4548 }
4549 EndLoc = Parser.getTok().getEndLoc();
4550 Parser.Lex(); // Eat the ']'.
4551 int64_t Val = CE->getValue();
4552
4553 // FIXME: Make this range check context sensitive for .8, .16, .32.
4554 if (Val < 0 || Val > 7) {
4555 Error(Parser.getTok().getLoc(), "lane index out of range");
4556 return MatchOperand_ParseFail;
4557 }
4558 Index = Val;
4559 LaneKind = IndexedLane;
4560 return MatchOperand_Success;
4561 }
4562 LaneKind = NoLanes;
4563 return MatchOperand_Success;
4564}
4565
4566// parse a vector register list
4567OperandMatchResultTy
4568ARMAsmParser::parseVectorList(OperandVector &Operands) {
4569 MCAsmParser &Parser = getParser();
4570 VectorLaneTy LaneKind;
4571 unsigned LaneIndex;
4572 SMLoc S = Parser.getTok().getLoc();
4573 // As an extension (to match gas), support a plain D register or Q register
4574 // (without encosing curly braces) as a single or double entry list,
4575 // respectively.
4576 if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4577 SMLoc E = Parser.getTok().getEndLoc();
4578 int Reg = tryParseRegister();
4579 if (Reg == -1)
4580 return MatchOperand_NoMatch;
4581 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4582 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4583 if (Res != MatchOperand_Success)
4584 return Res;
4585 switch (LaneKind) {
4586 case NoLanes:
4587 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4588 break;
4589 case AllLanes:
4590 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4591 S, E));
4592 break;
4593 case IndexedLane:
4594 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4595 LaneIndex,
4596 false, S, E));
4597 break;
4598 }
4599 return MatchOperand_Success;
4600 }
4601 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4602 Reg = getDRegFromQReg(Reg);
4603 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4604 if (Res != MatchOperand_Success)
4605 return Res;
4606 switch (LaneKind) {
4607 case NoLanes:
4608 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4609 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4610 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4611 break;
4612 case AllLanes:
4613 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4614 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4615 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4616 S, E));
4617 break;
4618 case IndexedLane:
4619 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4620 LaneIndex,
4621 false, S, E));
4622 break;
4623 }
4624 return MatchOperand_Success;
4625 }
4626 Error(S, "vector register expected");
4627 return MatchOperand_ParseFail;
4628 }
4629
4630 if (Parser.getTok().isNot(AsmToken::LCurly))
4631 return MatchOperand_NoMatch;
4632
4633 Parser.Lex(); // Eat '{' token.
4634 SMLoc RegLoc = Parser.getTok().getLoc();
4635
4636 int Reg = tryParseRegister();
4637 if (Reg == -1) {
4638 Error(RegLoc, "register expected");
4639 return MatchOperand_ParseFail;
4640 }
4641 unsigned Count = 1;
4642 int Spacing = 0;
4643 unsigned FirstReg = Reg;
4644
4645 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4646 Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
4647 return MatchOperand_ParseFail;
4648 }
4649 // The list is of D registers, but we also allow Q regs and just interpret
4650 // them as the two D sub-registers.
4651 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4652 FirstReg = Reg = getDRegFromQReg(Reg);
4653 Spacing = 1; // double-spacing requires explicit D registers, otherwise
4654 // it's ambiguous with four-register single spaced.
4655 ++Reg;
4656 ++Count;
4657 }
4658
4659 SMLoc E;
4660 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4661 return MatchOperand_ParseFail;
4662
4663 while (Parser.getTok().is(AsmToken::Comma) ||
4664 Parser.getTok().is(AsmToken::Minus)) {
4665 if (Parser.getTok().is(AsmToken::Minus)) {
4666 if (!Spacing)
4667 Spacing = 1; // Register range implies a single spaced list.
4668 else if (Spacing == 2) {
4669 Error(Parser.getTok().getLoc(),
4670 "sequential registers in double spaced list");
4671 return MatchOperand_ParseFail;
4672 }
4673 Parser.Lex(); // Eat the minus.
4674 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4675 int EndReg = tryParseRegister();
4676 if (EndReg == -1) {
4677 Error(AfterMinusLoc, "register expected");
4678 return MatchOperand_ParseFail;
4679 }
4680 // Allow Q regs and just interpret them as the two D sub-registers.
4681 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4682 EndReg = getDRegFromQReg(EndReg) + 1;
4683 // If the register is the same as the start reg, there's nothing
4684 // more to do.
4685 if (Reg == EndReg)
4686 continue;
4687 // The register must be in the same register class as the first.
4688 if ((hasMVE() &&
4689 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4690 (!hasMVE() &&
4691 !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
4692 Error(AfterMinusLoc, "invalid register in register list");
4693 return MatchOperand_ParseFail;
4694 }
4695 // Ranges must go from low to high.
4696 if (Reg > EndReg) {
4697 Error(AfterMinusLoc, "bad range in register list");
4698 return MatchOperand_ParseFail;
4699 }
4700 // Parse the lane specifier if present.
4701 VectorLaneTy NextLaneKind;
4702 unsigned NextLaneIndex;
4703 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4704 MatchOperand_Success)
4705 return MatchOperand_ParseFail;
4706 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4707 Error(AfterMinusLoc, "mismatched lane index in register list");
4708 return MatchOperand_ParseFail;
4709 }
4710
4711 // Add all the registers in the range to the register list.
4712 Count += EndReg - Reg;
4713 Reg = EndReg;
4714 continue;
4715 }
4716 Parser.Lex(); // Eat the comma.
4717 RegLoc = Parser.getTok().getLoc();
4718 int OldReg = Reg;
4719 Reg = tryParseRegister();
4720 if (Reg == -1) {
4721 Error(RegLoc, "register expected");
4722 return MatchOperand_ParseFail;
4723 }
4724
4725 if (hasMVE()) {
4726 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4727 Error(RegLoc, "vector register in range Q0-Q7 expected");
4728 return MatchOperand_ParseFail;
4729 }
4730 Spacing = 1;
4731 }
4732 // vector register lists must be contiguous.
4733 // It's OK to use the enumeration values directly here rather, as the
4734 // VFP register classes have the enum sorted properly.
4735 //
4736 // The list is of D registers, but we also allow Q regs and just interpret
4737 // them as the two D sub-registers.
4738 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4739 if (!Spacing)
4740 Spacing = 1; // Register range implies a single spaced list.
4741 else if (Spacing == 2) {
4742 Error(RegLoc,
4743 "invalid register in double-spaced list (must be 'D' register')");
4744 return MatchOperand_ParseFail;
4745 }
4746 Reg = getDRegFromQReg(Reg);
4747 if (Reg != OldReg + 1) {
4748 Error(RegLoc, "non-contiguous register range");
4749 return MatchOperand_ParseFail;
4750 }
4751 ++Reg;
4752 Count += 2;
4753 // Parse the lane specifier if present.
4754 VectorLaneTy NextLaneKind;
4755 unsigned NextLaneIndex;
4756 SMLoc LaneLoc = Parser.getTok().getLoc();
4757 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4758 MatchOperand_Success)
4759 return MatchOperand_ParseFail;
4760 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4761 Error(LaneLoc, "mismatched lane index in register list");
4762 return MatchOperand_ParseFail;
4763 }
4764 continue;
4765 }
4766 // Normal D register.
4767 // Figure out the register spacing (single or double) of the list if
4768 // we don't know it already.
4769 if (!Spacing)
4770 Spacing = 1 + (Reg == OldReg + 2);
4771
4772 // Just check that it's contiguous and keep going.
4773 if (Reg != OldReg + Spacing) {
4774 Error(RegLoc, "non-contiguous register range");
4775 return MatchOperand_ParseFail;
4776 }
4777 ++Count;
4778 // Parse the lane specifier if present.
4779 VectorLaneTy NextLaneKind;
4780 unsigned NextLaneIndex;
4781 SMLoc EndLoc = Parser.getTok().getLoc();
4782 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4783 return MatchOperand_ParseFail;
4784 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4785 Error(EndLoc, "mismatched lane index in register list");
4786 return MatchOperand_ParseFail;
4787 }
4788 }
4789
4790 if (Parser.getTok().isNot(AsmToken::RCurly)) {
4791 Error(Parser.getTok().getLoc(), "'}' expected");
4792 return MatchOperand_ParseFail;
4793 }
4794 E = Parser.getTok().getEndLoc();
4795 Parser.Lex(); // Eat '}' token.
4796
4797 switch (LaneKind) {
4798 case NoLanes:
4799 case AllLanes: {
4800 // Two-register operands have been converted to the
4801 // composite register classes.
4802 if (Count == 2 && !hasMVE()) {
4803 const MCRegisterClass *RC = (Spacing == 1) ?
4804 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4805 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4806 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4807 }
4808 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4809 ARMOperand::CreateVectorListAllLanes);
4810 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4811 break;
4812 }
4813 case IndexedLane:
4814 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4815 LaneIndex,
4816 (Spacing == 2),
4817 S, E));
4818 break;
4819 }
4820 return MatchOperand_Success;
4821}
4822
4823/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4824OperandMatchResultTy
4825ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4826 MCAsmParser &Parser = getParser();
4827 SMLoc S = Parser.getTok().getLoc();
4828 const AsmToken &Tok = Parser.getTok();
4829 unsigned Opt;
4830
4831 if (Tok.is(AsmToken::Identifier)) {
4832 StringRef OptStr = Tok.getString();
4833
4834 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4835 .Case("sy", ARM_MB::SY)
4836 .Case("st", ARM_MB::ST)
4837 .Case("ld", ARM_MB::LD)
4838 .Case("sh", ARM_MB::ISH)
4839 .Case("ish", ARM_MB::ISH)
4840 .Case("shst", ARM_MB::ISHST)
4841 .Case("ishst", ARM_MB::ISHST)
4842 .Case("ishld", ARM_MB::ISHLD)
4843 .Case("nsh", ARM_MB::NSH)
4844 .Case("un", ARM_MB::NSH)
4845 .Case("nshst", ARM_MB::NSHST)
4846 .Case("nshld", ARM_MB::NSHLD)
4847 .Case("unst", ARM_MB::NSHST)
4848 .Case("osh", ARM_MB::OSH)
4849 .Case("oshst", ARM_MB::OSHST)
4850 .Case("oshld", ARM_MB::OSHLD)
4851 .Default(~0U);
4852
4853 // ishld, oshld, nshld and ld are only available from ARMv8.
4854 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4855 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4856 Opt = ~0U;
4857
4858 if (Opt == ~0U)
4859 return MatchOperand_NoMatch;
4860
4861 Parser.Lex(); // Eat identifier token.
4862 } else if (Tok.is(AsmToken::Hash) ||
4863 Tok.is(AsmToken::Dollar) ||
4864 Tok.is(AsmToken::Integer)) {
4865 if (Parser.getTok().isNot(AsmToken::Integer))
4866 Parser.Lex(); // Eat '#' or '$'.
4867 SMLoc Loc = Parser.getTok().getLoc();
4868
4869 const MCExpr *MemBarrierID;
4870 if (getParser().parseExpression(MemBarrierID)) {
4871 Error(Loc, "illegal expression");
4872 return MatchOperand_ParseFail;
4873 }
4874
4875 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4876 if (!CE) {
4877 Error(Loc, "constant expression expected");
4878 return MatchOperand_ParseFail;
4879 }
4880
4881 int Val = CE->getValue();
4882 if (Val & ~0xf) {
4883 Error(Loc, "immediate value out of range");
4884 return MatchOperand_ParseFail;
4885 }
4886
4887 Opt = ARM_MB::RESERVED_0 + Val;
4888 } else
4889 return MatchOperand_ParseFail;
4890
4891 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4892 return MatchOperand_Success;
4893}
4894
4895OperandMatchResultTy
4896ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4897 MCAsmParser &Parser = getParser();
4898 SMLoc S = Parser.getTok().getLoc();
4899 const AsmToken &Tok = Parser.getTok();
4900
4901 if (Tok.isNot(AsmToken::Identifier))
4902 return MatchOperand_NoMatch;
4903
4904 if (!Tok.getString().equals_lower("csync"))
4905 return MatchOperand_NoMatch;
4906
4907 Parser.Lex(); // Eat identifier token.
4908
4909 Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4910 return MatchOperand_Success;
4911}
4912
4913/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4914OperandMatchResultTy
4915ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4916 MCAsmParser &Parser = getParser();
4917 SMLoc S = Parser.getTok().getLoc();
4918 const AsmToken &Tok = Parser.getTok();
4919 unsigned Opt;
4920
4921 if (Tok.is(AsmToken::Identifier)) {
4922 StringRef OptStr = Tok.getString();
4923
4924 if (OptStr.equals_lower("sy"))
4925 Opt = ARM_ISB::SY;
4926 else
4927 return MatchOperand_NoMatch;
4928
4929 Parser.Lex(); // Eat identifier token.
4930 } else if (Tok.is(AsmToken::Hash) ||
4931 Tok.is(AsmToken::Dollar) ||
4932 Tok.is(AsmToken::Integer)) {
4933 if (Parser.getTok().isNot(AsmToken::Integer))
4934 Parser.Lex(); // Eat '#' or '$'.
4935 SMLoc Loc = Parser.getTok().getLoc();
4936
4937 const MCExpr *ISBarrierID;
4938 if (getParser().parseExpression(ISBarrierID)) {
4939 Error(Loc, "illegal expression");
4940 return MatchOperand_ParseFail;
4941 }
4942
4943 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4944 if (!CE) {
4945 Error(Loc, "constant expression expected");
4946 return MatchOperand_ParseFail;
4947 }
4948
4949 int Val = CE->getValue();
4950 if (Val & ~0xf) {
4951 Error(Loc, "immediate value out of range");
4952 return MatchOperand_ParseFail;
4953 }
4954
4955 Opt = ARM_ISB::RESERVED_0 + Val;
4956 } else
4957 return MatchOperand_ParseFail;
4958
4959 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4960 (ARM_ISB::InstSyncBOpt)Opt, S));
4961 return MatchOperand_Success;
4962}
4963
4964
4965/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4966OperandMatchResultTy
4967ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4968 MCAsmParser &Parser = getParser();
4969 SMLoc S = Parser.getTok().getLoc();
4970 const AsmToken &Tok = Parser.getTok();
4971 if (!Tok.is(AsmToken::Identifier))
4972 return MatchOperand_NoMatch;
4973 StringRef IFlagsStr = Tok.getString();
4974
4975 // An iflags string of "none" is interpreted to mean that none of the AIF
4976 // bits are set. Not a terribly useful instruction, but a valid encoding.
4977 unsigned IFlags = 0;
4978 if (IFlagsStr != "none") {
4979 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4980 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4981 .Case("a", ARM_PROC::A)
4982 .Case("i", ARM_PROC::I)
4983 .Case("f", ARM_PROC::F)
4984 .Default(~0U);
4985
4986 // If some specific iflag is already set, it means that some letter is
4987 // present more than once, this is not acceptable.
4988 if (Flag == ~0U || (IFlags & Flag))
4989 return MatchOperand_NoMatch;
4990
4991 IFlags |= Flag;
4992 }
4993 }
4994
4995 Parser.Lex(); // Eat identifier token.
4996 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4997 return MatchOperand_Success;
4998}
4999
5000/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5001OperandMatchResultTy
5002ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5003 MCAsmParser &Parser = getParser();
5004 SMLoc S = Parser.getTok().getLoc();
5005 const AsmToken &Tok = Parser.getTok();
5006
5007 if (Tok.is(AsmToken::Integer)) {
5008 int64_t Val = Tok.getIntVal();
5009 if (Val > 255 || Val < 0) {
5010 return MatchOperand_NoMatch;
5011 }
5012 unsigned SYSmvalue = Val & 0xFF;
5013 Parser.Lex();
5014 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5015 return MatchOperand_Success;
5016 }
5017
5018 if (!Tok.is(AsmToken::Identifier))
5019 return MatchOperand_NoMatch;
5020 StringRef Mask = Tok.getString();
5021
5022 if (isMClass()) {
5023 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5024 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5025 return MatchOperand_NoMatch;
5026
5027 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5028
5029 Parser.Lex(); // Eat identifier token.
5030 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5031 return MatchOperand_Success;
5032 }
5033
5034 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5035 size_t Start = 0, Next = Mask.find('_');
5036 StringRef Flags = "";
5037 std::string SpecReg = Mask.slice(Start, Next).lower();
5038 if (Next != StringRef::npos)
5039 Flags = Mask.slice(Next+1, Mask.size());
5040
5041 // FlagsVal contains the complete mask:
5042 // 3-0: Mask
5043 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5044 unsigned FlagsVal = 0;
5045
5046 if (SpecReg == "apsr") {
5047 FlagsVal = StringSwitch<unsigned>(Flags)
5048 .Case("nzcvq", 0x8) // same as CPSR_f
5049 .Case("g", 0x4) // same as CPSR_s
5050 .Case("nzcvqg", 0xc) // same as CPSR_fs
5051 .Default(~0U);
5052
5053 if (FlagsVal == ~0U) {
5054 if (!Flags.empty())
5055 return MatchOperand_NoMatch;
5056 else
5057 FlagsVal = 8; // No flag
5058 }
5059 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5060 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5061 if (Flags == "all" || Flags == "")
5062 Flags = "fc";
5063 for (int i = 0, e = Flags.size(); i != e; ++i) {
5064 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5065 .Case("c", 1)
5066 .Case("x", 2)
5067 .Case("s", 4)
5068 .Case("f", 8)
5069 .Default(~0U);
5070
5071 // If some specific flag is already set, it means that some letter is
5072 // present more than once, this is not acceptable.
5073 if (Flag == ~0U || (FlagsVal & Flag))
5074 return MatchOperand_NoMatch;
5075 FlagsVal |= Flag;
5076 }
5077 } else // No match for special register.
5078 return MatchOperand_NoMatch;
5079
5080 // Special register without flags is NOT equivalent to "fc" flags.
5081 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
5082 // two lines would enable gas compatibility at the expense of breaking
5083 // round-tripping.
5084 //
5085 // if (!FlagsVal)
5086 // FlagsVal = 0x9;
5087
5088 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5089 if (SpecReg == "spsr")
5090 FlagsVal |= 16;
5091
5092 Parser.Lex(); // Eat identifier token.
5093 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5094 return MatchOperand_Success;
5095}
5096
5097/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5098/// use in the MRS/MSR instructions added to support virtualization.
5099OperandMatchResultTy
5100ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5101 MCAsmParser &Parser = getParser();
5102 SMLoc S = Parser.getTok().getLoc();
5103 const AsmToken &Tok = Parser.getTok();
5104 if (!Tok.is(AsmToken::Identifier))
5105 return MatchOperand_NoMatch;
5106 StringRef RegName = Tok.getString();
5107
5108 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5109 if (!TheReg)
5110 return MatchOperand_NoMatch;
5111 unsigned Encoding = TheReg->Encoding;
5112
5113 Parser.Lex(); // Eat identifier token.
5114 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5115 return MatchOperand_Success;
5116}
5117
5118OperandMatchResultTy
5119ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
5120 int High) {
5121 MCAsmParser &Parser = getParser();
5122 const AsmToken &Tok = Parser.getTok();
5123 if (Tok.isNot(AsmToken::Identifier)) {
5124 Error(Parser.getTok().getLoc(), Op + " operand expected.");
5125 return MatchOperand_ParseFail;
5126 }
5127 StringRef ShiftName = Tok.getString();
5128 std::string LowerOp = Op.lower();
5129 std::string UpperOp = Op.upper();
5130 if (ShiftName != LowerOp && ShiftName != UpperOp) {
5131 Error(Parser.getTok().getLoc(), Op + " operand expected.");
5132 return MatchOperand_ParseFail;
5133 }
5134 Parser.Lex(); // Eat shift type token.
5135
5136 // There must be a '#' and a shift amount.
5137 if (Parser.getTok().isNot(AsmToken::Hash) &&
5138 Parser.getTok().isNot(AsmToken::Dollar)) {
5139 Error(Parser.getTok().getLoc(), "'#' expected");
5140 return MatchOperand_ParseFail;
5141 }
5142 Parser.Lex(); // Eat hash token.
5143
5144 const MCExpr *ShiftAmount;
5145 SMLoc Loc = Parser.getTok().getLoc();
5146 SMLoc EndLoc;
5147 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5148 Error(Loc, "illegal expression");
5149 return MatchOperand_ParseFail;
5150 }
5151 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5152 if (!CE) {
5153 Error(Loc, "constant expression expected");
5154 return MatchOperand_ParseFail;
5155 }
5156 int Val = CE->getValue();
5157 if (Val < Low || Val > High) {
5158 Error(Loc, "immediate value out of range");
5159 return MatchOperand_ParseFail;
5160 }
5161
5162 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5163
5164 return MatchOperand_Success;
5165}
5166
5167OperandMatchResultTy
5168ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5169 MCAsmParser &Parser = getParser();
5170 const AsmToken &Tok = Parser.getTok();
5171 SMLoc S = Tok.getLoc();
5172 if (Tok.isNot(AsmToken::Identifier)) {
5173 Error(S, "'be' or 'le' operand expected");
5174 return MatchOperand_ParseFail;
5175 }
5176 int Val = StringSwitch<int>(Tok.getString().lower())
5177 .Case("be", 1)
5178 .Case("le", 0)
5179 .Default(-1);
5180 Parser.Lex(); // Eat the token.
5181
5182 if (Val == -1) {
5183 Error(S, "'be' or 'le' operand expected");
5184 return MatchOperand_ParseFail;
5185 }
5186 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5187 getContext()),
5188 S, Tok.getEndLoc()));
5189 return MatchOperand_Success;
5190}
5191
5192/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5193/// instructions. Legal values are:
5194/// lsl #n 'n' in [0,31]
5195/// asr #n 'n' in [1,32]
5196/// n == 32 encoded as n == 0.
5197OperandMatchResultTy
5198ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5199 MCAsmParser &Parser = getParser();
5200 const AsmToken &Tok = Parser.getTok();
5201 SMLoc S = Tok.getLoc();
5202 if (Tok.isNot(AsmToken::Identifier)) {
5203 Error(S, "shift operator 'asr' or 'lsl' expected");
5204 return MatchOperand_ParseFail;
5205 }
5206 StringRef ShiftName = Tok.getString();
5207 bool isASR;
5208 if (ShiftName == "lsl" || ShiftName == "LSL")
5209 isASR = false;
5210 else if (ShiftName == "asr" || ShiftName == "ASR")
5211 isASR = true;
5212 else {
5213 Error(S, "shift operator 'asr' or 'lsl' expected");
5214 return MatchOperand_ParseFail;
5215 }
5216 Parser.Lex(); // Eat the operator.
5217
5218 // A '#' and a shift amount.
5219 if (Parser.getTok().isNot(AsmToken::Hash) &&
5220 Parser.getTok().isNot(AsmToken::Dollar)) {
5221 Error(Parser.getTok().getLoc(), "'#' expected");
5222 return MatchOperand_ParseFail;
5223 }
5224 Parser.Lex(); // Eat hash token.
5225 SMLoc ExLoc = Parser.getTok().getLoc();
5226
5227 const MCExpr *ShiftAmount;
5228 SMLoc EndLoc;
5229 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5230 Error(ExLoc, "malformed shift expression");
5231 return MatchOperand_ParseFail;
5232 }
5233 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5234 if (!CE) {
5235 Error(ExLoc, "shift amount must be an immediate");
5236 return MatchOperand_ParseFail;
5237 }
5238
5239 int64_t Val = CE->getValue();
5240 if (isASR) {
5241 // Shift amount must be in [1,32]
5242 if (Val < 1 || Val > 32) {
5243 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5244 return MatchOperand_ParseFail;
5245 }
5246 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5247 if (isThumb() && Val == 32) {
5248 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5249 return MatchOperand_ParseFail;
5250 }
5251 if (Val == 32) Val = 0;
5252 } else {
5253 // Shift amount must be in [1,32]
5254 if (Val < 0 || Val > 31) {
5255 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5256 return MatchOperand_ParseFail;
5257 }
5258 }
5259
5260 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5261
5262 return MatchOperand_Success;
5263}
5264
5265/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5266/// of instructions. Legal values are:
5267/// ror #n 'n' in {0, 8, 16, 24}
5268OperandMatchResultTy
5269ARMAsmParser::parseRotImm(OperandVector &Operands) {
5270 MCAsmParser &Parser = getParser();
5271 const AsmToken &Tok = Parser.getTok();
5272 SMLoc S = Tok.getLoc();
5273 if (Tok.isNot(AsmToken::Identifier))
5274 return MatchOperand_NoMatch;
5275 StringRef ShiftName = Tok.getString();
5276 if (ShiftName != "ror" && ShiftName != "ROR")
5277 return MatchOperand_NoMatch;
5278 Parser.Lex(); // Eat the operator.
5279
5280 // A '#' and a rotate amount.
5281 if (Parser.getTok().isNot(AsmToken::Hash) &&
5282 Parser.getTok().isNot(AsmToken::Dollar)) {
5283 Error(Parser.getTok().getLoc(), "'#' expected");
5284 return MatchOperand_ParseFail;
5285 }
5286 Parser.Lex(); // Eat hash token.
5287 SMLoc ExLoc = Parser.getTok().getLoc();
5288
5289 const MCExpr *ShiftAmount;
5290 SMLoc EndLoc;
5291 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5292 Error(ExLoc, "malformed rotate expression");
5293 return MatchOperand_ParseFail;
5294 }
5295 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5296 if (!CE) {
5297 Error(ExLoc, "rotate amount must be an immediate");
5298 return MatchOperand_ParseFail;
5299 }
5300
5301 int64_t Val = CE->getValue();
5302 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5303 // normally, zero is represented in asm by omitting the rotate operand
5304 // entirely.
5305 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
5306 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5307 return MatchOperand_ParseFail;
5308 }
5309
5310 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5311
5312 return MatchOperand_Success;
5313}
5314
5315OperandMatchResultTy
5316ARMAsmParser::parseModImm(OperandVector &Operands) {
5317 MCAsmParser &Parser = getParser();
5318 MCAsmLexer &Lexer = getLexer();
5319 int64_t Imm1, Imm2;
5320
5321 SMLoc S = Parser.getTok().getLoc();
5322
5323 // 1) A mod_imm operand can appear in the place of a register name:
5324 // add r0, #mod_imm
5325 // add r0, r0, #mod_imm
5326 // to correctly handle the latter, we bail out as soon as we see an
5327 // identifier.
5328 //
5329 // 2) Similarly, we do not want to parse into complex operands:
5330 // mov r0, #mod_imm
5331 // mov r0, :lower16:(_foo)
5332 if (Parser.getTok().is(AsmToken::Identifier) ||
5333 Parser.getTok().is(AsmToken::Colon))
5334 return MatchOperand_NoMatch;
5335
5336 // Hash (dollar) is optional as per the ARMARM
5337 if (Parser.getTok().is(AsmToken::Hash) ||
5338 Parser.getTok().is(AsmToken::Dollar)) {
5339 // Avoid parsing into complex operands (#:)
5340 if (Lexer.peekTok().is(AsmToken::Colon))
5341 return MatchOperand_NoMatch;
5342
5343 // Eat the hash (dollar)
5344 Parser.Lex();
5345 }
5346
5347 SMLoc Sx1, Ex1;
5348 Sx1 = Parser.getTok().getLoc();
5349 const MCExpr *Imm1Exp;
5350 if (getParser().parseExpression(Imm1Exp, Ex1)) {
5351 Error(Sx1, "malformed expression");
5352 return MatchOperand_ParseFail;
5353 }
5354
5355 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5356
5357 if (CE) {
5358 // Immediate must fit within 32-bits
5359 Imm1 = CE->getValue();
5360 int Enc = ARM_AM::getSOImmVal(Imm1);
5361 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5362 // We have a match!
5363 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5364 (Enc & 0xF00) >> 7,
5365 Sx1, Ex1));
5366 return MatchOperand_Success;
5367 }
5368
5369 // We have parsed an immediate which is not for us, fallback to a plain
5370 // immediate. This can happen for instruction aliases. For an example,
5371 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5372 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5373 // instruction with a mod_imm operand. The alias is defined such that the
5374 // parser method is shared, that's why we have to do this here.
5375 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5376 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5377 return MatchOperand_Success;
5378 }
5379 } else {
5380 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5381 // MCFixup). Fallback to a plain immediate.
5382 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5383 return MatchOperand_Success;
5384 }
5385
5386 // From this point onward, we expect the input to be a (#bits, #rot) pair
5387 if (Parser.getTok().isNot(AsmToken::Comma)) {
5388 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
5389 return MatchOperand_ParseFail;
5390 }
5391
5392 if (Imm1 & ~0xFF) {
5393 Error(Sx1, "immediate operand must a number in the range [0, 255]");
5394 return MatchOperand_ParseFail;
5395 }
5396
5397 // Eat the comma
5398 Parser.Lex();
5399
5400 // Repeat for #rot
5401 SMLoc Sx2, Ex2;
5402 Sx2 = Parser.getTok().getLoc();
5403
5404 // Eat the optional hash (dollar)
5405 if (Parser.getTok().is(AsmToken::Hash) ||
5406 Parser.getTok().is(AsmToken::Dollar))
5407 Parser.Lex();
5408
5409 const MCExpr *Imm2Exp;
5410 if (getParser().parseExpression(Imm2Exp, Ex2)) {
5411 Error(Sx2, "malformed expression");
5412 return MatchOperand_ParseFail;
5413 }
5414
5415 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5416
5417 if (CE) {
5418 Imm2 = CE->getValue();
5419 if (!(Imm2 & ~0x1E)) {
5420 // We have a match!
5421 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5422 return MatchOperand_Success;
5423 }
5424 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
5425 return MatchOperand_ParseFail;
5426 } else {
5427 Error(Sx2, "constant expression expected");
5428 return MatchOperand_ParseFail;
5429 }
5430}
5431
5432OperandMatchResultTy
5433ARMAsmParser::parseBitfield(OperandVector &Operands) {
5434 MCAsmParser &Parser = getParser();
5435 SMLoc S = Parser.getTok().getLoc();
5436 // The bitfield descriptor is really two operands, the LSB and the width.
5437 if (Parser.getTok().isNot(AsmToken::Hash) &&
5438 Parser.getTok().isNot(AsmToken::Dollar)) {
5439 Error(Parser.getTok().getLoc(), "'#' expected");
5440 return MatchOperand_ParseFail;
5441 }
5442 Parser.Lex(); // Eat hash token.
5443
5444 const MCExpr *LSBExpr;
5445 SMLoc E = Parser.getTok().getLoc();
5446 if (getParser().parseExpression(LSBExpr)) {
5447 Error(E, "malformed immediate expression");
5448 return MatchOperand_ParseFail;
5449 }
5450 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5451 if (!CE) {
5452 Error(E, "'lsb' operand must be an immediate");
5453 return MatchOperand_ParseFail;
5454 }
5455
5456 int64_t LSB = CE->getValue();
5457 // The LSB must be in the range [0,31]
5458 if (LSB < 0 || LSB > 31) {
5459 Error(E, "'lsb' operand must be in the range [0,31]");
5460 return MatchOperand_ParseFail;
5461 }
5462 E = Parser.getTok().getLoc();
5463
5464 // Expect another immediate operand.
5465 if (Parser.getTok().isNot(AsmToken::Comma)) {
5466 Error(Parser.getTok().getLoc(), "too few operands");
5467 return MatchOperand_ParseFail;
5468 }
5469 Parser.Lex(); // Eat hash token.
5470 if (Parser.getTok().isNot(AsmToken::Hash) &&
5471 Parser.getTok().isNot(AsmToken::Dollar)) {
5472 Error(Parser.getTok().getLoc(), "'#' expected");
5473 return MatchOperand_ParseFail;
5474 }
5475 Parser.Lex(); // Eat hash token.
5476
5477 const MCExpr *WidthExpr;
5478 SMLoc EndLoc;
5479 if (getParser().parseExpression(WidthExpr, EndLoc)) {
5480 Error(E, "malformed immediate expression");
5481 return MatchOperand_ParseFail;
5482 }
5483 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5484 if (!CE) {
5485 Error(E, "'width' operand must be an immediate");
5486 return MatchOperand_ParseFail;
5487 }
5488
5489 int64_t Width = CE->getValue();
5490 // The LSB must be in the range [1,32-lsb]
5491 if (Width < 1 || Width > 32 - LSB) {
5492 Error(E, "'width' operand must be in the range [1,32-lsb]");
5493 return MatchOperand_ParseFail;
5494 }
5495
5496 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5497
5498 return MatchOperand_Success;
5499}
5500
5501OperandMatchResultTy
5502ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5503 // Check for a post-index addressing register operand. Specifically:
5504 // postidx_reg := '+' register {, shift}
5505 // | '-' register {, shift}
5506 // | register {, shift}
5507
5508 // This method must return MatchOperand_NoMatch without consuming any tokens
5509 // in the case where there is no match, as other alternatives take other
5510 // parse methods.
5511 MCAsmParser &Parser = getParser();
5512 AsmToken Tok = Parser.getTok();
5513 SMLoc S = Tok.getLoc();
5514 bool haveEaten = false;
5515 bool isAdd = true;
5516 if (Tok.is(AsmToken::Plus)) {
5517 Parser.Lex(); // Eat the '+' token.
5518 haveEaten = true;
5519 } else if (Tok.is(AsmToken::Minus)) {
5520 Parser.Lex(); // Eat the '-' token.
5521 isAdd = false;
5522 haveEaten = true;
5523 }
5524
5525 SMLoc E = Parser.getTok().getEndLoc();
5526 int Reg = tryParseRegister();
5527 if (Reg == -1) {
5528 if (!haveEaten)
5529 return MatchOperand_NoMatch;
5530 Error(Parser.getTok().getLoc(), "register expected");
5531 return MatchOperand_ParseFail;
5532 }
5533
5534 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5535 unsigned ShiftImm = 0;
5536 if (Parser.getTok().is(AsmToken::Comma)) {
5537 Parser.Lex(); // Eat the ','.
5538 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5539 return MatchOperand_ParseFail;
5540
5541 // FIXME: Only approximates end...may include intervening whitespace.
5542 E = Parser.getTok().getLoc();
5543 }
5544
5545 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5546 ShiftImm, S, E));
5547
5548 return MatchOperand_Success;
5549}
5550
5551OperandMatchResultTy
5552ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5553 // Check for a post-index addressing register operand. Specifically:
5554 // am3offset := '+' register
5555 // | '-' register
5556 // | register
5557 // | # imm
5558 // | # + imm
5559 // | # - imm
5560
5561 // This method must return MatchOperand_NoMatch without consuming any tokens
5562 // in the case where there is no match, as other alternatives take other
5563 // parse methods.
5564 MCAsmParser &Parser = getParser();
5565 AsmToken Tok = Parser.getTok();
5566 SMLoc S = Tok.getLoc();
5567
5568 // Do immediates first, as we always parse those if we have a '#'.
5569 if (Parser.getTok().is(AsmToken::Hash) ||
5570 Parser.getTok().is(AsmToken::Dollar)) {
5571 Parser.Lex(); // Eat '#' or '$'.
5572 // Explicitly look for a '-', as we need to encode negative zero
5573 // differently.
5574 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5575 const MCExpr *Offset;
5576 SMLoc E;
5577 if (getParser().parseExpression(Offset, E))
5578 return MatchOperand_ParseFail;
5579 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5580 if (!CE) {
5581 Error(S, "constant expression expected");
5582 return MatchOperand_ParseFail;
5583 }
5584 // Negative zero is encoded as the flag value
5585 // std::numeric_limits<int32_t>::min().
5586 int32_t Val = CE->getValue();
5587 if (isNegative && Val == 0)
5588 Val = std::numeric_limits<int32_t>::min();
5589
5590 Operands.push_back(
5591 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5592
5593 return MatchOperand_Success;
5594 }
5595
5596 bool haveEaten = false;
5597 bool isAdd = true;
5598 if (Tok.is(AsmToken::Plus)) {
5599 Parser.Lex(); // Eat the '+' token.
5600 haveEaten = true;
5601 } else if (Tok.is(AsmToken::Minus)) {
5602 Parser.Lex(); // Eat the '-' token.
5603 isAdd = false;
5604 haveEaten = true;
5605 }
5606
5607 Tok = Parser.getTok();
5608 int Reg = tryParseRegister();
5609 if (Reg == -1) {
5610 if (!haveEaten)
5611 return MatchOperand_NoMatch;
5612 Error(Tok.getLoc(), "register expected");
5613 return MatchOperand_ParseFail;
5614 }
5615
5616 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5617 0, S, Tok.getEndLoc()));
5618
5619 return MatchOperand_Success;
5620}
5621
5622/// Convert parsed operands to MCInst. Needed here because this instruction
5623/// only has two register operands, but multiplication is commutative so
5624/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5625void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5626 const OperandVector &Operands) {
5627 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5628 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5629 // If we have a three-operand form, make sure to set Rn to be the operand
5630 // that isn't the same as Rd.
5631 unsigned RegOp = 4;
5632 if (Operands.size() == 6 &&
5633 ((ARMOperand &)*Operands[4]).getReg() ==
5634 ((ARMOperand &)*Operands[3]).getReg())
5635 RegOp = 5;
5636 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5637 Inst.addOperand(Inst.getOperand(0));
5638 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5639}
5640
5641void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5642 const OperandVector &Operands) {
5643 int CondOp = -1, ImmOp = -1;
5644 switch(Inst.getOpcode()) {
5645 case ARM::tB:
5646 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
5647
5648 case ARM::t2B:
5649 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5650
5651 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches")::llvm::llvm_unreachable_internal("Unexpected instruction in cvtThumbBranches"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 5651)
;
5652 }
5653 // first decide whether or not the branch should be conditional
5654 // by looking at it's location relative to an IT block
5655 if(inITBlock()) {
5656 // inside an IT block we cannot have any conditional branches. any
5657 // such instructions needs to be converted to unconditional form
5658 switch(Inst.getOpcode()) {
5659 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5660 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5661 }
5662 } else {
5663 // outside IT blocks we can only have unconditional branches with AL
5664 // condition code or conditional branches with non-AL condition code
5665 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5666 switch(Inst.getOpcode()) {
5667 case ARM::tB:
5668 case ARM::tBcc:
5669 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5670 break;
5671 case ARM::t2B:
5672 case ARM::t2Bcc:
5673 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5674 break;
5675 }
5676 }
5677
5678 // now decide on encoding size based on branch target range
5679 switch(Inst.getOpcode()) {
5680 // classify tB as either t2B or t1B based on range of immediate operand
5681 case ARM::tB: {
5682 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5683 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5684 Inst.setOpcode(ARM::t2B);
5685 break;
5686 }
5687 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5688 case ARM::tBcc: {
5689 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5690 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5691 Inst.setOpcode(ARM::t2Bcc);
5692 break;
5693 }
5694 }
5695 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5696 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5697}
5698
5699void ARMAsmParser::cvtMVEVMOVQtoDReg(
5700 MCInst &Inst, const OperandVector &Operands) {
5701
5702 // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5703 assert(Operands.size() == 8)((Operands.size() == 8) ? static_cast<void> (0) : __assert_fail
("Operands.size() == 8", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 5703, __PRETTY_FUNCTION__))
;
5704
5705 ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5706 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5707 ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5708 ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5709 // skip second copy of Qd in Operands[6]
5710 ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5711 ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5712}
5713
5714/// Parse an ARM memory expression, return false if successful else return true
5715/// or an error. The first token must be a '[' when called.
5716bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5717 MCAsmParser &Parser = getParser();
5718 SMLoc S, E;
5719 if (Parser.getTok().isNot(AsmToken::LBrac))
5720 return TokError("Token is not a Left Bracket");
5721 S = Parser.getTok().getLoc();
5722 Parser.Lex(); // Eat left bracket token.
5723
5724 const AsmToken &BaseRegTok = Parser.getTok();
5725 int BaseRegNum = tryParseRegister();
5726 if (BaseRegNum == -1)
5727 return Error(BaseRegTok.getLoc(), "register expected");
5728
5729 // The next token must either be a comma, a colon or a closing bracket.
5730 const AsmToken &Tok = Parser.getTok();
5731 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5732 !Tok.is(AsmToken::RBrac))
5733 return Error(Tok.getLoc(), "malformed memory operand");
5734
5735 if (Tok.is(AsmToken::RBrac)) {
5736 E = Tok.getEndLoc();
5737 Parser.Lex(); // Eat right bracket token.
5738
5739 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5740 ARM_AM::no_shift, 0, 0, false,
5741 S, E));
5742
5743 // If there's a pre-indexing writeback marker, '!', just add it as a token
5744 // operand. It's rather odd, but syntactically valid.
5745 if (Parser.getTok().is(AsmToken::Exclaim)) {
5746 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5747 Parser.Lex(); // Eat the '!'.
5748 }
5749
5750 return false;
5751 }
5752
5753 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&(((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
"Lost colon or comma in memory operand?!") ? static_cast<
void> (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 5754, __PRETTY_FUNCTION__))
5754 "Lost colon or comma in memory operand?!")(((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
"Lost colon or comma in memory operand?!") ? static_cast<
void> (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 5754, __PRETTY_FUNCTION__))
;
5755 if (Tok.is(AsmToken::Comma)) {
5756 Parser.Lex(); // Eat the comma.
5757 }
5758
5759 // If we have a ':', it's an alignment specifier.
5760 if (Parser.getTok().is(AsmToken::Colon)) {
5761 Parser.Lex(); // Eat the ':'.
5762 E = Parser.getTok().getLoc();
5763 SMLoc AlignmentLoc = Tok.getLoc();
5764
5765 const MCExpr *Expr;
5766 if (getParser().parseExpression(Expr))
5767 return true;
5768
5769 // The expression has to be a constant. Memory references with relocations
5770 // don't come through here, as they use the <label> forms of the relevant
5771 // instructions.
5772 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5773 if (!CE)
5774 return Error (E, "constant expression expected");
5775
5776 unsigned Align = 0;
5777 switch (CE->getValue()) {
5778 default:
5779 return Error(E,
5780 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5781 case 16: Align = 2; break;
5782 case 32: Align = 4; break;
5783 case 64: Align = 8; break;
5784 case 128: Align = 16; break;
5785 case 256: Align = 32; break;
5786 }
5787
5788 // Now we should have the closing ']'
5789 if (Parser.getTok().isNot(AsmToken::RBrac))
5790 return Error(Parser.getTok().getLoc(), "']' expected");
5791 E = Parser.getTok().getEndLoc();
5792 Parser.Lex(); // Eat right bracket token.
5793
5794 // Don't worry about range checking the value here. That's handled by
5795 // the is*() predicates.
5796 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5797 ARM_AM::no_shift, 0, Align,
5798 false, S, E, AlignmentLoc));
5799
5800 // If there's a pre-indexing writeback marker, '!', just add it as a token
5801 // operand.
5802 if (Parser.getTok().is(AsmToken::Exclaim)) {
5803 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5804 Parser.Lex(); // Eat the '!'.
5805 }
5806
5807 return false;
5808 }
5809
5810 // If we have a '#' or '$', it's an immediate offset, else assume it's a
5811 // register offset. Be friendly and also accept a plain integer or expression
5812 // (without a leading hash) for gas compatibility.
5813 if (Parser.getTok().is(AsmToken::Hash) ||
5814 Parser.getTok().is(AsmToken::Dollar) ||
5815 Parser.getTok().is(AsmToken::LParen) ||
5816 Parser.getTok().is(AsmToken::Integer)) {
5817 if (Parser.getTok().is(AsmToken::Hash) ||
5818 Parser.getTok().is(AsmToken::Dollar))
5819 Parser.Lex(); // Eat '#' or '$'
5820 E = Parser.getTok().getLoc();
5821
5822 bool isNegative = getParser().getTok().is(AsmToken::Minus);
5823 const MCExpr *Offset;
5824 if (getParser().parseExpression(Offset))
5825 return true;
5826
5827 // The expression has to be a constant. Memory references with relocations
5828 // don't come through here, as they use the <label> forms of the relevant
5829 // instructions.
5830 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5831 if (!CE)
5832 return Error (E, "constant expression expected");
5833
5834 // If the constant was #-0, represent it as
5835 // std::numeric_limits<int32_t>::min().
5836 int32_t Val = CE->getValue();
5837 if (isNegative && Val == 0)
5838 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5839 getContext());
5840
5841 // Now we should have the closing ']'
5842 if (Parser.getTok().isNot(AsmToken::RBrac))
5843 return Error(Parser.getTok().getLoc(), "']' expected");
5844 E = Parser.getTok().getEndLoc();
5845 Parser.Lex(); // Eat right bracket token.
5846
5847 // Don't worry about range checking the value here. That's handled by
5848 // the is*() predicates.
5849 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5850 ARM_AM::no_shift, 0, 0,
5851 false, S, E));
5852
5853 // If there's a pre-indexing writeback marker, '!', just add it as a token
5854 // operand.
5855 if (Parser.getTok().is(AsmToken::Exclaim)) {
5856 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5857 Parser.Lex(); // Eat the '!'.
5858 }
5859
5860 return false;
5861 }
5862
5863 // The register offset is optionally preceded by a '+' or '-'
5864 bool isNegative = false;
5865 if (Parser.getTok().is(AsmToken::Minus)) {
5866 isNegative = true;
5867 Parser.Lex(); // Eat the '-'.
5868 } else if (Parser.getTok().is(AsmToken::Plus)) {
5869 // Nothing to do.
5870 Parser.Lex(); // Eat the '+'.
5871 }
5872
5873 E = Parser.getTok().getLoc();
5874 int OffsetRegNum = tryParseRegister();
5875 if (OffsetRegNum == -1)
5876 return Error(E, "register expected");
5877
5878 // If there's a shift operator, handle it.
5879 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5880 unsigned ShiftImm = 0;
5881 if (Parser.getTok().is(AsmToken::Comma)) {
5882 Parser.Lex(); // Eat the ','.
5883 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5884 return true;
5885 }
5886
5887 // Now we should have the closing ']'
5888 if (Parser.getTok().isNot(AsmToken::RBrac))
5889 return Error(Parser.getTok().getLoc(), "']' expected");
5890 E = Parser.getTok().getEndLoc();
5891 Parser.Lex(); // Eat right bracket token.
5892
5893 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5894 ShiftType, ShiftImm, 0, isNegative,
5895 S, E));
5896
5897 // If there's a pre-indexing writeback marker, '!', just add it as a token
5898 // operand.
5899 if (Parser.getTok().is(AsmToken::Exclaim)) {
5900 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5901 Parser.Lex(); // Eat the '!'.
5902 }
5903
5904 return false;
5905}
5906
5907/// parseMemRegOffsetShift - one of these two:
5908/// ( lsl | lsr | asr | ror ) , # shift_amount
5909/// rrx
5910/// return true if it parses a shift otherwise it returns false.
5911bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5912 unsigned &Amount) {
5913 MCAsmParser &Parser = getParser();
5914 SMLoc Loc = Parser.getTok().getLoc();
5915 const AsmToken &Tok = Parser.getTok();
5916 if (Tok.isNot(AsmToken::Identifier))
5917 return Error(Loc, "illegal shift operator");
5918 StringRef ShiftName = Tok.getString();
5919 if (ShiftName == "lsl" || ShiftName == "LSL" ||
5920 ShiftName == "asl" || ShiftName == "ASL")
5921 St = ARM_AM::lsl;
5922 else if (ShiftName == "lsr" || ShiftName == "LSR")
5923 St = ARM_AM::lsr;
5924 else if (ShiftName == "asr" || ShiftName == "ASR")
5925 St = ARM_AM::asr;
5926 else if (ShiftName == "ror" || ShiftName == "ROR")
5927 St = ARM_AM::ror;
5928 else if (ShiftName == "rrx" || ShiftName == "RRX")
5929 St = ARM_AM::rrx;
5930 else if (ShiftName == "uxtw" || ShiftName == "UXTW")
5931 St = ARM_AM::uxtw;
5932 else
5933 return Error(Loc, "illegal shift operator");
5934 Parser.Lex(); // Eat shift type token.
5935
5936 // rrx stands alone.
5937 Amount = 0;
5938 if (St != ARM_AM::rrx) {
5939 Loc = Parser.getTok().getLoc();
5940 // A '#' and a shift amount.
5941 const AsmToken &HashTok = Parser.getTok();
5942 if (HashTok.isNot(AsmToken::Hash) &&
5943 HashTok.isNot(AsmToken::Dollar))
5944 return Error(HashTok.getLoc(), "'#' expected");
5945 Parser.Lex(); // Eat hash token.
5946
5947 const MCExpr *Expr;
5948 if (getParser().parseExpression(Expr))
5949 return true;
5950 // Range check the immediate.
5951 // lsl, ror: 0 <= imm <= 31
5952 // lsr, asr: 0 <= imm <= 32
5953 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5954 if (!CE)
5955 return Error(Loc, "shift amount must be an immediate");
5956 int64_t Imm = CE->getValue();
5957 if (Imm < 0 ||
5958 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5959 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5960 return Error(Loc, "immediate shift value out of range");
5961 // If <ShiftTy> #0, turn it into a no_shift.
5962 if (Imm == 0)
5963 St = ARM_AM::lsl;
5964 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5965 if (Imm == 32)
5966 Imm = 0;
5967 Amount = Imm;
5968 }
5969
5970 return false;
5971}
5972
5973/// parseFPImm - A floating point immediate expression operand.
5974OperandMatchResultTy
5975ARMAsmParser::parseFPImm(OperandVector &Operands) {
5976 MCAsmParser &Parser = getParser();
5977 // Anything that can accept a floating point constant as an operand
5978 // needs to go through here, as the regular parseExpression is
5979 // integer only.
5980 //
5981 // This routine still creates a generic Immediate operand, containing
5982 // a bitcast of the 64-bit floating point value. The various operands
5983 // that accept floats can check whether the value is valid for them
5984 // via the standard is*() predicates.
5985
5986 SMLoc S = Parser.getTok().getLoc();
5987
5988 if (Parser.getTok().isNot(AsmToken::Hash) &&
5989 Parser.getTok().isNot(AsmToken::Dollar))
5990 return MatchOperand_NoMatch;
5991
5992 // Disambiguate the VMOV forms that can accept an FP immediate.
5993 // vmov.f32 <sreg>, #imm
5994 // vmov.f64 <dreg>, #imm
5995 // vmov.f32 <dreg>, #imm @ vector f32x2
5996 // vmov.f32 <qreg>, #imm @ vector f32x4
5997 //
5998 // There are also the NEON VMOV instructions which expect an
5999 // integer constant. Make sure we don't try to parse an FPImm
6000 // for these:
6001 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
6002 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
6003 bool isVmovf = TyOp.isToken() &&
6004 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
6005 TyOp.getToken() == ".f16");
6006 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
6007 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
6008 Mnemonic.getToken() == "fconsts");
6009 if (!(isVmovf || isFconst))
6010 return MatchOperand_NoMatch;
6011
6012 Parser.Lex(); // Eat '#' or '$'.
6013
6014 // Handle negation, as that still comes through as a separate token.
6015 bool isNegative = false;
6016 if (Parser.getTok().is(AsmToken::Minus)) {
6017 isNegative = true;
6018 Parser.Lex();
6019 }
6020 const AsmToken &Tok = Parser.getTok();
6021 SMLoc Loc = Tok.getLoc();
6022 if (Tok.is(AsmToken::Real) && isVmovf) {
6023 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
6024 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6025 // If we had a '-' in front, toggle the sign bit.
6026 IntVal ^= (uint64_t)isNegative << 31;
6027 Parser.Lex(); // Eat the token.
6028 Operands.push_back(ARMOperand::CreateImm(
6029 MCConstantExpr::create(IntVal, getContext()),
6030 S, Parser.getTok().getLoc()));
6031 return MatchOperand_Success;
6032 }
6033 // Also handle plain integers. Instructions which allow floating point
6034 // immediates also allow a raw encoded 8-bit value.
6035 if (Tok.is(AsmToken::Integer) && isFconst) {
6036 int64_t Val = Tok.getIntVal();
6037 Parser.Lex(); // Eat the token.
6038 if (Val > 255 || Val < 0) {
6039 Error(Loc, "encoded floating point value out of range");
6040 return MatchOperand_ParseFail;
6041 }
6042 float RealVal = ARM_AM::getFPImmFloat(Val);
6043 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6044
6045 Operands.push_back(ARMOperand::CreateImm(
6046 MCConstantExpr::create(Val, getContext()), S,
6047 Parser.getTok().getLoc()));
6048 return MatchOperand_Success;
6049 }
6050
6051 Error(Loc, "invalid floating point immediate");
6052 return MatchOperand_ParseFail;
6053}
6054
6055/// Parse a arm instruction operand. For now this parses the operand regardless
6056/// of the mnemonic.
6057bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6058 MCAsmParser &Parser = getParser();
6059 SMLoc S, E;
6060
6061 // Check if the current operand has a custom associated parser, if so, try to
6062 // custom parse the operand, or fallback to the general approach.
6063 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6064 if (ResTy == MatchOperand_Success)
6065 return false;
6066 // If there wasn't a custom match, try the generic matcher below. Otherwise,
6067 // there was a match, but an error occurred, in which case, just return that
6068 // the operand parsing failed.
6069 if (ResTy == MatchOperand_ParseFail)
6070 return true;
6071
6072 switch (getLexer().getKind()) {
6073 default:
6074 Error(Parser.getTok().getLoc(), "unexpected token in operand");
6075 return true;
6076 case AsmToken::Identifier: {
6077 // If we've seen a branch mnemonic, the next operand must be a label. This
6078 // is true even if the label is a register name. So "br r1" means branch to
6079 // label "r1".
6080 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6081 if (!ExpectLabel) {
6082 if (!tryParseRegisterWithWriteBack(Operands))
6083 return false;
6084 int Res = tryParseShiftRegister(Operands);
6085 if (Res == 0) // success
6086 return false;
6087 else if (Res == -1) // irrecoverable error
6088 return true;
6089 // If this is VMRS, check for the apsr_nzcv operand.
6090 if (Mnemonic == "vmrs" &&
6091 Parser.getTok().getString().equals_lower("apsr_nzcv")) {
6092 S = Parser.getTok().getLoc();
6093 Parser.Lex();
6094 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6095 return false;
6096 }
6097 }
6098
6099 // Fall though for the Identifier case that is not a register or a
6100 // special name.
6101 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6102 }
6103 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
6104 case AsmToken::Integer: // things like 1f and 2b as a branch targets
6105 case AsmToken::String: // quoted label names.
6106 case AsmToken::Dot: { // . as a branch target
6107 // This was not a register so parse other operands that start with an
6108 // identifier (like labels) as expressions and create them as immediates.
6109 const MCExpr *IdVal;
6110 S = Parser.getTok().getLoc();
6111 if (getParser().parseExpression(IdVal))
6112 return true;
6113 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6114 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6115 return false;
6116 }
6117 case AsmToken::LBrac:
6118 return parseMemory(Operands);
6119 case AsmToken::LCurly:
6120 return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
6121 case AsmToken::Dollar:
6122 case AsmToken::Hash: {
6123 // #42 -> immediate
6124 // $ 42 -> immediate
6125 // $foo -> symbol name
6126 // $42 -> symbol name
6127 S = Parser.getTok().getLoc();
6128
6129 // Favor the interpretation of $-prefixed operands as symbol names.
6130 // Cases where immediates are explicitly expected are handled by their
6131 // specific ParseMethod implementations.
6132 auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6133 bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6134 (AdjacentToken.is(AsmToken::Identifier) ||
6135 AdjacentToken.is(AsmToken::Integer));
6136 if (!ExpectIdentifier) {
6137 // Token is not part of identifier. Drop leading $ or # before parsing
6138 // expression.
6139 Parser.Lex();
6140 }
6141
6142 if (Parser.getTok().isNot(AsmToken::Colon)) {
6143 bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6144 const MCExpr *ImmVal;
6145 if (getParser().parseExpression(ImmVal))
6146 return true;
6147 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6148 if (CE) {
6149 int32_t Val = CE->getValue();
6150 if (IsNegative && Val == 0)
6151 ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6152 getContext());
6153 }
6154 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6155 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6156
6157 // There can be a trailing '!' on operands that we want as a separate
6158 // '!' Token operand. Handle that here. For example, the compatibility
6159 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6160 if (Parser.getTok().is(AsmToken::Exclaim)) {
6161 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6162 Parser.getTok().getLoc()));
6163 Parser.Lex(); // Eat exclaim token
6164 }
6165 return false;
6166 }
6167 // w/ a ':' after the '#', it's just like a plain ':'.
6168 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6169 }
6170 case AsmToken::Colon: {
6171 S = Parser.getTok().getLoc();
6172 // ":lower16:" and ":upper16:" expression prefixes
6173 // FIXME: Check it's an expression prefix,
6174 // e.g. (FOO - :lower16:BAR) isn't legal.
6175 ARMMCExpr::VariantKind RefKind;
6176 if (parsePrefix(RefKind))
6177 return true;
6178
6179 const MCExpr *SubExprVal;
6180 if (getParser().parseExpression(SubExprVal))
6181 return true;
6182
6183 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6184 getContext());
6185 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6186 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6187 return false;
6188 }
6189 case AsmToken::Equal: {
6190 S = Parser.getTok().getLoc();
6191 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6192 return Error(S, "unexpected token in operand");
6193 Parser.Lex(); // Eat '='
6194 const MCExpr *SubExprVal;
6195 if (getParser().parseExpression(SubExprVal))
6196 return true;
6197 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6198
6199 // execute-only: we assume that assembly programmers know what they are
6200 // doing and allow literal pool creation here
6201 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6202 return false;
6203 }
6204 }
6205}
6206
6207// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
6208// :lower16: and :upper16:.
6209bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6210 MCAsmParser &Parser = getParser();
6211 RefKind = ARMMCExpr::VK_ARM_None;
6212
6213 // consume an optional '#' (GNU compatibility)
6214 if (getLexer().is(AsmToken::Hash))
6215 Parser.Lex();
6216
6217 // :lower16: and :upper16: modifiers
6218 assert(getLexer().is(AsmToken::Colon) && "expected a :")((getLexer().is(AsmToken::Colon) && "expected a :") ?
static_cast<void> (0) : __assert_fail ("getLexer().is(AsmToken::Colon) && \"expected a :\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 6218, __PRETTY_FUNCTION__))
;
6219 Parser.Lex(); // Eat ':'
6220
6221 if (getLexer().isNot(AsmToken::Identifier)) {
6222 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6223 return true;
6224 }
6225
6226 enum {
6227 COFF = (1 << MCObjectFileInfo::IsCOFF),
6228 ELF = (1 << MCObjectFileInfo::IsELF),
6229 MACHO = (1 << MCObjectFileInfo::IsMachO),
6230 WASM = (1 << MCObjectFileInfo::IsWasm),
6231 };
6232 static const struct PrefixEntry {
6233 const char *Spelling;
6234 ARMMCExpr::VariantKind VariantKind;
6235 uint8_t SupportedFormats;
6236 } PrefixEntries[] = {
6237 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
6238 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
6239 };
6240
6241 StringRef IDVal = Parser.getTok().getIdentifier();
6242
6243 const auto &Prefix =
6244 std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
6245 [&IDVal](const PrefixEntry &PE) {
6246 return PE.Spelling == IDVal;
6247 });
6248 if (Prefix == std::end(PrefixEntries)) {
6249 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6250 return true;
6251 }
6252
6253 uint8_t CurrentFormat;
6254 switch (getContext().getObjectFileInfo()->getObjectFileType()) {
6255 case MCObjectFileInfo::IsMachO:
6256 CurrentFormat = MACHO;
6257 break;
6258 case MCObjectFileInfo::IsELF:
6259 CurrentFormat = ELF;
6260 break;
6261 case MCObjectFileInfo::IsCOFF:
6262 CurrentFormat = COFF;
6263 break;
6264 case MCObjectFileInfo::IsWasm:
6265 CurrentFormat = WASM;
6266 break;
6267 case MCObjectFileInfo::IsXCOFF:
6268 llvm_unreachable("unexpected object format")::llvm::llvm_unreachable_internal("unexpected object format",
"/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 6268)
;
6269 break;
6270 }
6271
6272 if (~Prefix->SupportedFormats & CurrentFormat) {
6273 Error(Parser.getTok().getLoc(),
6274 "cannot represent relocation in the current file format");
6275 return true;
6276 }
6277
6278 RefKind = Prefix->VariantKind;
6279 Parser.Lex();
6280
6281 if (getLexer().isNot(AsmToken::Colon)) {
6282 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6283 return true;
6284 }
6285 Parser.Lex(); // Eat the last ':'
6286
6287 return false;
6288}
6289
6290/// Given a mnemonic, split out possible predication code and carry
6291/// setting letters to form a canonical mnemonic and flags.
6292//
6293// FIXME: Would be nice to autogen this.
6294// FIXME: This is a bit of a maze of special cases.
6295StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
6296 StringRef ExtraToken,
6297 unsigned &PredicationCode,
6298 unsigned &VPTPredicationCode,
6299 bool &CarrySetting,
6300 unsigned &ProcessorIMod,
6301 StringRef &ITMask) {
6302 PredicationCode = ARMCC::AL;
6303 VPTPredicationCode = ARMVCC::None;
6304 CarrySetting = false;
6305 ProcessorIMod = 0;
6306
6307 // Ignore some mnemonics we know aren't predicated forms.
6308 //
6309 // FIXME: Would be nice to autogen this.
6310 if ((Mnemonic == "movs" && isThumb()) ||
6311 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
6312 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
6313 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
6314 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
6315 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
6316 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
6317 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
6318 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
6319 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
6320 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
6321 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
6322 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6323 Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
6324 Mnemonic == "bxns" || Mnemonic == "blxns" ||
6325 Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6326 Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6327 Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6328 Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" ||
6329 Mnemonic == "csel" || Mnemonic == "csinc" ||
6330 Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
6331 Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" ||
6332 Mnemonic == "csetm")
6333 return Mnemonic;
6334
6335 // First, split out any predication code. Ignore mnemonics we know aren't
6336 // predicated but do have a carry-set and so weren't caught above.
6337 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6338 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6339 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6340 Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6341 !(hasMVE() &&
6342 (Mnemonic == "vmine" ||
6343 Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" ||
6344 Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6345 Mnemonic == "vmvne" || Mnemonic == "vorne" ||
6346 Mnemonic == "vnege" || Mnemonic == "vnegt" ||
6347 Mnemonic == "vmule" || Mnemonic == "vmult" ||
6348 Mnemonic == "vrintne" ||
6349 Mnemonic == "vcmult" || Mnemonic == "vcmule" ||
6350 Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6351 Mnemonic.startswith("vq")))) {
6352 unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6353 if (CC != ~0U) {
6354 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6355 PredicationCode = CC;
6356 }
6357 }
6358
6359 // Next, determine if we have a carry setting bit. We explicitly ignore all
6360 // the instructions we know end in 's'.
6361 if (Mnemonic.endswith("s") &&
6362 !(Mnemonic == "cps" || Mnemonic == "mls" ||
6363 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
6364 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
6365 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
6366 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
6367 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
6368 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
6369 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
6370 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
6371 Mnemonic == "bxns" || Mnemonic == "blxns" || Mnemonic == "vfmas" ||
6372 Mnemonic == "vmlas" ||
6373 (Mnemonic == "movs" && isThumb()))) {
6374 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6375 CarrySetting = true;
6376 }
6377
6378 // The "cps" instruction can have a interrupt mode operand which is glued into
6379 // the mnemonic. Check if this is the case, split it and parse the imod op
6380 if (Mnemonic.startswith("cps")) {
6381 // Split out any imod code.
6382 unsigned IMod =
6383 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6384 .Case("ie", ARM_PROC::IE)
6385 .Case("id", ARM_PROC::ID)
6386 .Default(~0U);
6387 if (IMod != ~0U) {
6388 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6389 ProcessorIMod = IMod;
6390 }
6391 }
6392
6393 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6394 Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6395 Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6396 Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6397 Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6398 Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6399 Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6400 unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
6401 if (CC != ~0U) {
6402 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6403 VPTPredicationCode = CC;
6404 }
6405 return Mnemonic;
6406 }
6407
6408 // The "it" instruction has the condition mask on the end of the mnemonic.
6409 if (Mnemonic.startswith("it")) {
6410 ITMask = Mnemonic.slice(2, Mnemonic.size());
6411 Mnemonic = Mnemonic.slice(0, 2);
6412 }
6413
6414 if (Mnemonic.startswith("vpst")) {
6415 ITMask = Mnemonic.slice(4, Mnemonic.size());
6416 Mnemonic = Mnemonic.slice(0, 4);
6417 }
6418 else if (Mnemonic.startswith("vpt")) {
6419 ITMask = Mnemonic.slice(3, Mnemonic.size());
6420 Mnemonic = Mnemonic.slice(0, 3);
6421 }
6422
6423 return Mnemonic;
6424}
6425
6426/// Given a canonical mnemonic, determine if the instruction ever allows
6427/// inclusion of carry set or predication code operands.
6428//
6429// FIXME: It would be nice to autogen this.
6430void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6431 StringRef ExtraToken,
6432 StringRef FullInst,
6433 bool &CanAcceptCarrySet,
6434 bool &CanAcceptPredicationCode,
6435 bool &CanAcceptVPTPredicationCode) {
6436 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6437
6438 CanAcceptCarrySet =
6439 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6440 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6441 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6442 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6443 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6444 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6445 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6446 (!isThumb() &&
6447 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6448 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6449
6450 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6451 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6452 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6453 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
6454 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
6455 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6456 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6457 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6458 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
6459 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
6460 (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
6461 Mnemonic == "vmovx" || Mnemonic == "vins" ||
6462 Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6463 Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6464 Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6465 Mnemonic == "sb" || Mnemonic == "ssbb" ||
6466 Mnemonic == "pssbb" ||
6467 Mnemonic == "bfcsel" || Mnemonic == "wls" ||
6468 Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
6469 Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6470 Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6471 Mnemonic == "cset" || Mnemonic == "csetm" ||
6472 Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst") ||
6473 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6474 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6475 (hasMVE() &&
6476 (Mnemonic.startswith("vst2") || Mnemonic.startswith("vld2") ||
6477 Mnemonic.startswith("vst4") || Mnemonic.startswith("vld4") ||
6478 Mnemonic.startswith("wlstp") || Mnemonic.startswith("dlstp") ||
6479 Mnemonic.startswith("letp")))) {
6480 // These mnemonics are never predicable
6481 CanAcceptPredicationCode = false;
6482 } else if (!isThumb()) {
6483 // Some instructions are only predicable in Thumb mode
6484 CanAcceptPredicationCode =
6485 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6486 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6487 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6488 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6489 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6490 Mnemonic != "stc2" && Mnemonic != "stc2l" &&
6491 Mnemonic != "tsb" &&
6492 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
6493 } else if (isThumbOne()) {
6494 if (hasV6MOps())
6495 CanAcceptPredicationCode = Mnemonic != "movs";
6496 else
6497 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6498 } else
6499 CanAcceptPredicationCode = true;
6500}
6501
6502// Some Thumb instructions have two operand forms that are not
6503// available as three operand, convert to two operand form if possible.
6504//
6505// FIXME: We would really like to be able to tablegen'erate this.
6506void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6507 bool CarrySetting,
6508 OperandVector &Operands) {
6509 if (Operands.size() != 6)
6510 return;
6511
6512 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6513 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6514 if (!Op3.isReg() || !Op4.isReg())
6515 return;
6516
6517 auto Op3Reg = Op3.getReg();
6518 auto Op4Reg = Op4.getReg();
6519
6520 // For most Thumb2 cases we just generate the 3 operand form and reduce
6521 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6522 // won't accept SP or PC so we do the transformation here taking care
6523 // with immediate range in the 'add sp, sp #imm' case.
6524 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6525 if (isThumbTwo()) {
6526 if (Mnemonic != "add")
6527 return;
6528 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6529 (Op5.isReg() && Op5.getReg() == ARM::PC);
6530 if (!TryTransform) {
6531 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6532 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6533 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6534 Op5.isImm() && !Op5.isImm0_508s4());
6535 }
6536 if (!TryTransform)
6537 return;
6538 } else if (!isThumbOne())
6539 return;
6540
6541 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6542 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6543 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6544 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6545 return;
6546
6547 // If first 2 operands of a 3 operand instruction are the same
6548 // then transform to 2 operand version of the same instruction
6549 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6550 bool Transform = Op3Reg == Op4Reg;
6551
6552 // For communtative operations, we might be able to transform if we swap
6553 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
6554 // as tADDrsp.
6555 const ARMOperand *LastOp = &Op5;
6556 bool Swap = false;
6557 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6558 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6559 Mnemonic == "and" || Mnemonic == "eor" ||
6560 Mnemonic == "adc" || Mnemonic == "orr")) {
6561 Swap = true;
6562 LastOp = &Op4;
6563 Transform = true;
6564 }
6565
6566 // If both registers are the same then remove one of them from
6567 // the operand list, with certain exceptions.
6568 if (Transform) {
6569 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6570 // 2 operand forms don't exist.
6571 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6572 LastOp->isReg())
6573 Transform = false;
6574
6575 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6576 // 3-bits because the ARMARM says not to.
6577 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6578 Transform = false;
6579 }
6580
6581 if (Transform) {
6582 if (Swap)
6583 std::swap(Op4, Op5);
6584 Operands.erase(Operands.begin() + 3);
6585 }
6586}
6587
6588bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6589 OperandVector &Operands) {
6590 // FIXME: This is all horribly hacky. We really need a better way to deal
6591 // with optional operands like this in the matcher table.
6592
6593 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6594 // another does not. Specifically, the MOVW instruction does not. So we
6595 // special case it here and remove the defaulted (non-setting) cc_out
6596 // operand if that's the instruction we're trying to match.
6597 //
6598 // We do this as post-processing of the explicit operands rather than just
6599 // conditionally adding the cc_out in the first place because we need
6600 // to check the type of the parsed immediate operand.
6601 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6602 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6603 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6604 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6605 return true;
6606
6607 // Register-register 'add' for thumb does not have a cc_out operand
6608 // when there are only two register operands.
6609 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6610 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6611 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6612 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6613 return true;
6614 // Register-register 'add' for thumb does not have a cc_out operand
6615 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6616 // have to check the immediate range here since Thumb2 has a variant
6617 // that can handle a different range and has a cc_out operand.
6618 if (((isThumb() && Mnemonic == "add") ||
6619 (isThumbTwo() && Mnemonic == "sub")) &&
6620 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6621 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6622 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6623 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6624 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6625 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6626 return true;
6627 // For Thumb2, add/sub immediate does not have a cc_out operand for the
6628 // imm0_4095 variant. That's the least-preferred variant when
6629 // selecting via the generic "add" mnemonic, so to know that we
6630 // should remove the cc_out operand, we have to explicitly check that
6631 // it's not one of the other variants. Ugh.
6632 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6633 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6634 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6635 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6636 // Nest conditions rather than one big 'if' statement for readability.
6637 //
6638 // If both registers are low, we're in an IT block, and the immediate is
6639 // in range, we should use encoding T1 instead, which has a cc_out.
6640 if (inITBlock() &&
6641 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6642 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6643 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6644 return false;
6645 // Check against T3. If the second register is the PC, this is an
6646 // alternate form of ADR, which uses encoding T4, so check for that too.
6647 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6648 (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
6649 static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
6650 return false;
6651
6652 // Otherwise, we use encoding T4, which does not have a cc_out
6653 // operand.
6654 return true;
6655 }
6656
6657 // The thumb2 multiply instruction doesn't have a CCOut register, so
6658 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6659 // use the 16-bit encoding or not.
6660 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6661 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6662 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6663 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6664 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6665 // If the registers aren't low regs, the destination reg isn't the
6666 // same as one of the source regs, or the cc_out operand is zero
6667 // outside of an IT block, we have to use the 32-bit encoding, so
6668 // remove the cc_out operand.
6669 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6670 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6671 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6672 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6673 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6674 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6675 static_cast<ARMOperand &>(*Operands[4]).getReg())))
6676 return true;
6677
6678 // Also check the 'mul' syntax variant that doesn't specify an explicit
6679 // destination register.
6680 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6681 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6682 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6683 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6684 // If the registers aren't low regs or the cc_out operand is zero
6685 // outside of an IT block, we have to use the 32-bit encoding, so
6686 // remove the cc_out operand.
6687 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6688 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6689 !inITBlock()))
6690 return true;
6691
6692 // Register-register 'add/sub' for thumb does not have a cc_out operand
6693 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6694 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6695 // right, this will result in better diagnostics (which operand is off)
6696 // anyway.
6697 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6698 (Operands.size() == 5 || Operands.size() == 6) &&
6699 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6700 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6701 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6702 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6703 (Operands.size() == 6 &&
6704 static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
6705 // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
6706 return (!(isThumbTwo() &&
6707 (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
6708 static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
6709 }
6710 // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
6711 // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
6712 // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
6713 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6714 (Operands.size() == 5) &&
6715 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6716 static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
6717 static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
6718 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6719 static_cast<ARMOperand &>(*Operands[4]).isImm()) {
6720 const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
6721 if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6722 return false; // add.w / sub.w
6723 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6724 const int64_t Value = CE->getValue();
6725 // Thumb1 imm8 sub / add
6726 if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
6727 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
6728 return false;
6729 return true; // Thumb2 T4 addw / subw
6730 }
6731 }
6732 return false;
6733}
6734
6735bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6736 OperandVector &Operands) {
6737 // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6738 unsigned RegIdx = 3;
6739 if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
6740 Mnemonic == "vrintr") &&
6741 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6742 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6743 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6744 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6745 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6746 RegIdx = 4;
6747
6748 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6749 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6750 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6751 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6752 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6753 return true;
6754 }
6755 return false;
6756}
6757
6758bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6759 OperandVector &Operands) {
6760 if (!hasMVE() || Operands.size() < 3)
6761 return true;
6762
6763 if (Mnemonic.startswith("vld2") || Mnemonic.startswith("vld4") ||
6764 Mnemonic.startswith("vst2") || Mnemonic.startswith("vst4"))
6765 return true;
6766
6767 if (Mnemonic.startswith("vctp") || Mnemonic.startswith("vpnot"))
6768 return false;
6769
6770 if (Mnemonic.startswith("vmov") &&
6771 !(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") ||
6772 Mnemonic.startswith("vmovx"))) {
6773 for (auto &Operand : Operands) {
6774 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6775 ((*Operand).isReg() &&
6776 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6777 (*Operand).getReg()) ||
6778 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6779 (*Operand).getReg())))) {
6780 return true;
6781 }
6782 }
6783 return false;
6784 } else {
6785 for (auto &Operand : Operands) {
6786 // We check the larger class QPR instead of just the legal class
6787 // MQPR, to more accurately report errors when using Q registers
6788 // outside of the allowed range.
6789 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6790 (Operand->isReg() &&
6791 (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6792 Operand->getReg()))))
6793 return false;
6794 }
6795 return true;
6796 }
6797}
6798
6799static bool isDataTypeToken(StringRef Tok) {
6800 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6801 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6802 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6803 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6804 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6805 Tok == ".f" || Tok == ".d";
6806}
6807
6808// FIXME: This bit should probably be handled via an explicit match class
6809// in the .td files that matches the suffix instead of having it be
6810// a literal string token the way it is now.
6811static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6812 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
6813}
6814
6815static void applyMnemonicAliases(StringRef &Mnemonic,
6816 const FeatureBitset &Features,
6817 unsigned VariantID);
6818
6819// The GNU assembler has aliases of ldrd and strd with the second register
6820// omitted. We don't have a way to do that in tablegen, so fix it up here.
6821//
6822// We have to be careful to not emit an invalid Rt2 here, because the rest of
6823// the assembly parser could then generate confusing diagnostics refering to
6824// it. If we do find anything that prevents us from doing the transformation we
6825// bail out, and let the assembly parser report an error on the instruction as
6826// it is written.
6827void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6828 OperandVector &Operands) {
6829 if (Mnemonic != "ldrd" && Mnemonic != "strd")
6830 return;
6831 if (Operands.size() < 4)
6832 return;
6833
6834 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6835 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6836
6837 if (!Op2.isReg())
6838 return;
6839 if (!Op3.isGPRMem())
6840 return;
6841
6842 const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6843 if (!GPR.contains(Op2.getReg()))
6844 return;
6845
6846 unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6847 if (!isThumb() && (RtEncoding & 1)) {
6848 // In ARM mode, the registers must be from an aligned pair, this
6849 // restriction does not apply in Thumb mode.
6850 return;
6851 }
6852 if (Op2.getReg() == ARM::PC)
6853 return;
6854 unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6855 if (!PairedReg || PairedReg == ARM::PC ||
6856 (PairedReg == ARM::SP && !hasV8Ops()))
6857 return;
6858
6859 Operands.insert(
6860 Operands.begin() + 3,
6861 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6862}
6863
6864// Dual-register instruction have the following syntax:
6865// <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6866// This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6867// operand. If the conversion fails an error is diagnosed, and the function
6868// returns true.
6869bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6870 OperandVector &Operands) {
6871 assert(MS.isCDEDualRegInstr(Mnemonic))((MS.isCDEDualRegInstr(Mnemonic)) ? static_cast<void> (
0) : __assert_fail ("MS.isCDEDualRegInstr(Mnemonic)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 6871, __PRETTY_FUNCTION__))
;
6872 bool isPredicable =
6873 Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
6874 size_t NumPredOps = isPredicable ? 1 : 0;
6875
6876 if (Operands.size() <= 3 + NumPredOps)
6877 return false;
6878
6879 StringRef Op2Diag(
6880 "operand must be an even-numbered register in the range [r0, r10]");
6881
6882 const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
6883 if (!Op2.isReg())
6884 return Error(Op2.getStartLoc(), Op2Diag);
6885
6886 unsigned RNext;
6887 unsigned RPair;
6888 switch (Op2.getReg()) {
6889 default:
6890 return Error(Op2.getStartLoc(), Op2Diag);
6891 case ARM::R0:
6892 RNext = ARM::R1;
6893 RPair = ARM::R0_R1;
6894 break;
6895 case ARM::R2:
6896 RNext = ARM::R3;
6897 RPair = ARM::R2_R3;
6898 break;
6899 case ARM::R4:
6900 RNext = ARM::R5;
6901 RPair = ARM::R4_R5;
6902 break;
6903 case ARM::R6:
6904 RNext = ARM::R7;
6905 RPair = ARM::R6_R7;
6906 break;
6907 case ARM::R8:
6908 RNext = ARM::R9;
6909 RPair = ARM::R8_R9;
6910 break;
6911 case ARM::R10:
6912 RNext = ARM::R11;
6913 RPair = ARM::R10_R11;
6914 break;
6915 }
6916
6917 const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
6918 if (!Op3.isReg() || Op3.getReg() != RNext)
6919 return Error(Op3.getStartLoc(), "operand must be a consecutive register");
6920
6921 Operands.erase(Operands.begin() + 3 + NumPredOps);
6922 Operands[2 + NumPredOps] =
6923 ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
6924 return false;
6925}
6926
6927/// Parse an arm instruction mnemonic followed by its operands.
6928bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6929 SMLoc NameLoc, OperandVector &Operands) {
6930 MCAsmParser &Parser = getParser();
6931
6932 // Apply mnemonic aliases before doing anything else, as the destination
6933 // mnemonic may include suffices and we want to handle them normally.
6934 // The generic tblgen'erated code does this later, at the start of
6935 // MatchInstructionImpl(), but that's too late for aliases that include
6936 // any sort of suffix.
6937 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6938 unsigned AssemblerDialect = getParser().getAssemblerDialect();
6939 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6940
6941 // First check for the ARM-specific .req directive.
6942 if (Parser.getTok().is(AsmToken::Identifier) &&
6943 Parser.getTok().getIdentifier().lower() == ".req") {
6944 parseDirectiveReq(Name, NameLoc);
6945 // We always return 'error' for this, as we're done with this
6946 // statement and don't need to match the 'instruction."
6947 return true;
6948 }
6949
6950 // Create the leading tokens for the mnemonic, split by '.' characters.
6951 size_t Start = 0, Next = Name.find('.');
6952 StringRef Mnemonic = Name.slice(Start, Next);
6953 StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6954
6955 // Split out the predication code and carry setting flag from the mnemonic.
6956 unsigned PredicationCode;
6957 unsigned VPTPredicationCode;
6958 unsigned ProcessorIMod;
6959 bool CarrySetting;
6960 StringRef ITMask;
6961 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6962 CarrySetting, ProcessorIMod, ITMask);
6963
6964 // In Thumb1, only the branch (B) instruction can be predicated.
6965 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6966 return Error(NameLoc, "conditional execution not supported in Thumb1");
6967 }
6968
6969 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6970
6971 // Handle the mask for IT and VPT instructions. In ARMOperand and
6972 // MCOperand, this is stored in a format independent of the
6973 // condition code: the lowest set bit indicates the end of the
6974 // encoding, and above that, a 1 bit indicates 'else', and an 0
6975 // indicates 'then'. E.g.
6976 // IT -> 1000
6977 // ITx -> x100 (ITT -> 0100, ITE -> 1100)
6978 // ITxy -> xy10 (e.g. ITET -> 1010)
6979 // ITxyz -> xyz1 (e.g. ITEET -> 1101)
6980 if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
6981 Mnemonic.startswith("vpst")) {
6982 SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
6983 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
6984 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
6985 if (ITMask.size() > 3) {
6986 if (Mnemonic == "it")
6987 return Error(Loc, "too many conditions on IT instruction");
6988 return Error(Loc, "too many conditions on VPT instruction");
6989 }
6990 unsigned Mask = 8;
6991 for (unsigned i = ITMask.size(); i != 0; --i) {
6992 char pos = ITMask[i - 1];
6993 if (pos != 't' && pos != 'e') {
6994 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
6995 }
6996 Mask >>= 1;
6997 if (ITMask[i - 1] == 'e')
6998 Mask |= 8;
6999 }
7000 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7001 }
7002
7003 // FIXME: This is all a pretty gross hack. We should automatically handle
7004 // optional operands like this via tblgen.
7005
7006 // Next, add the CCOut and ConditionCode operands, if needed.
7007 //
7008 // For mnemonics which can ever incorporate a carry setting bit or predication
7009 // code, our matching model involves us always generating CCOut and
7010 // ConditionCode operands to match the mnemonic "as written" and then we let
7011 // the matcher deal with finding the right instruction or generating an
7012 // appropriate error.
7013 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7014 getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7015 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7016
7017 // If we had a carry-set on an instruction that can't do that, issue an
7018 // error.
7019 if (!CanAcceptCarrySet && CarrySetting) {
7020 return Error(NameLoc, "instruction '" + Mnemonic +
7021 "' can not set flags, but 's' suffix specified");
7022 }
7023 // If we had a predication code on an instruction that can't do that, issue an
7024 // error.
7025 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7026 return Error(NameLoc, "instruction '" + Mnemonic +
7027 "' is not predicable, but condition code specified");
7028 }
7029
7030 // If we had a VPT predication code on an instruction that can't do that, issue an
7031 // error.
7032 if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7033 return Error(NameLoc, "instruction '" + Mnemonic +
7034 "' is not VPT predicable, but VPT code T/E is specified");
7035 }
7036
7037 // Add the carry setting operand, if necessary.
7038 if (CanAcceptCarrySet) {
7039 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7040 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7041 Loc));
7042 }
7043
7044 // Add the predication code operand, if necessary.
7045 if (CanAcceptPredicationCode) {
7046 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7047 CarrySetting);
7048 Operands.push_back(ARMOperand::CreateCondCode(
7049 ARMCC::CondCodes(PredicationCode), Loc));
7050 }
7051
7052 // Add the VPT predication code operand, if necessary.
7053 // FIXME: We don't add them for the instructions filtered below as these can
7054 // have custom operands which need special parsing. This parsing requires
7055 // the operand to be in the same place in the OperandVector as their
7056 // definition in tblgen. Since these instructions may also have the
7057 // scalar predication operand we do not add the vector one and leave until
7058 // now to fix it up.
7059 if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
7060 !Mnemonic.startswith("vcmp") &&
7061 !(Mnemonic.startswith("vcvt") && Mnemonic != "vcvta" &&
7062 Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7063 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7064 CarrySetting);
7065 Operands.push_back(ARMOperand::CreateVPTPred(
7066 ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7067 }
7068
7069 // Add the processor imod operand, if necessary.
7070 if (ProcessorIMod) {
7071 Operands.push_back(ARMOperand::CreateImm(
7072 MCConstantExpr::create(ProcessorIMod, getContext()),
7073 NameLoc, NameLoc));
7074 } else if (Mnemonic == "cps" && isMClass()) {
7075 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7076 }
7077
7078 // Add the remaining tokens in the mnemonic.
7079 while (Next != StringRef::npos) {
7080 Start = Next;
7081 Next = Name.find('.', Start + 1);
7082 ExtraToken = Name.slice(Start, Next);
7083
7084 // Some NEON instructions have an optional datatype suffix that is
7085 // completely ignored. Check for that.
7086 if (isDataTypeToken(ExtraToken) &&
7087 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7088 continue;
7089
7090 // For for ARM mode generate an error if the .n qualifier is used.
7091 if (ExtraToken == ".n" && !isThumb()) {
7092 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7093 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7094 "arm mode");
7095 }
7096
7097 // The .n qualifier is always discarded as that is what the tables
7098 // and matcher expect. In ARM mode the .w qualifier has no effect,
7099 // so discard it to avoid errors that can be caused by the matcher.
7100 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7101 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7102 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7103 }
7104 }
7105
7106 // Read the remaining operands.
7107 if (getLexer().isNot(AsmToken::EndOfStatement)) {
7108 // Read the first operand.
7109 if (parseOperand(Operands, Mnemonic)) {
7110 return true;
7111 }
7112
7113 while (parseOptionalToken(AsmToken::Comma)) {
7114 // Parse and remember the operand.
7115 if (parseOperand(Operands, Mnemonic)) {
7116 return true;
7117 }
7118 }
7119 }
7120
7121 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7122 return true;
7123
7124 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
7125
7126 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7127 // Dual-register instructions use even-odd register pairs as their
7128 // destination operand, in assembly such pair is spelled as two
7129 // consecutive registers, without any special syntax. ConvertDualRegOperand
7130 // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7131 // It returns true, if an error message has been emitted. If the function
7132 // returns false, the function either succeeded or an error (e.g. missing
7133 // operand) will be diagnosed elsewhere.
7134 if (MS.isCDEDualRegInstr(Mnemonic)) {
7135 bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
7136 if (GotError)
7137 return GotError;
7138 }
7139 }
7140
7141 // Some instructions, mostly Thumb, have forms for the same mnemonic that
7142 // do and don't have a cc_out optional-def operand. With some spot-checks
7143 // of the operand list, we can figure out which variant we're trying to
7144 // parse and adjust accordingly before actually matching. We shouldn't ever
7145 // try to remove a cc_out operand that was explicitly set on the
7146 // mnemonic, of course (CarrySetting == true). Reason number #317 the
7147 // table driven matcher doesn't fit well with the ARM instruction set.
7148 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
7149 Operands.erase(Operands.begin() + 1);
7150
7151 // Some instructions have the same mnemonic, but don't always
7152 // have a predicate. Distinguish them here and delete the
7153 // appropriate predicate if needed. This could be either the scalar
7154 // predication code or the vector predication code.
7155 if (PredicationCode == ARMCC::AL &&
7156 shouldOmitPredicateOperand(Mnemonic, Operands))
7157 Operands.erase(Operands.begin() + 1);
7158
7159
7160 if (hasMVE()) {
7161 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
7162 Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7163 // Very nasty hack to deal with the vector predicated variant of vmovlt
7164 // the scalar predicated vmov with condition 'lt'. We can not tell them
7165 // apart until we have parsed their operands.
7166 Operands.erase(Operands.begin() + 1);
7167 Operands.erase(Operands.begin());
7168 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7169 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7170 Mnemonic.size() - 1 + CarrySetting);
7171 Operands.insert(Operands.begin(),
7172 ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7173 Operands.insert(Operands.begin(),
7174 ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7175 } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7176 !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7177 // Another nasty hack to deal with the ambiguity between vcvt with scalar
7178 // predication 'ne' and vcvtn with vector predication 'e'. As above we
7179 // can only distinguish between the two after we have parsed their
7180 // operands.
7181 Operands.erase(Operands.begin() + 1);
7182 Operands.erase(Operands.begin());
7183 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7184 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7185 Mnemonic.size() - 1 + CarrySetting);
7186 Operands.insert(Operands.begin(),
7187 ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7188 Operands.insert(Operands.begin(),
7189 ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7190 } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7191 !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7192 // Another hack, this time to distinguish between scalar predicated vmul
7193 // with 'lt' predication code and the vector instruction vmullt with
7194 // vector predication code "none"
7195 Operands.erase(Operands.begin() + 1);
7196 Operands.erase(Operands.begin());
7197 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7198 Operands.insert(Operands.begin(),
7199 ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7200 }
7201 // For vmov and vcmp, as mentioned earlier, we did not add the vector
7202 // predication code, since these may contain operands that require
7203 // special parsing. So now we have to see if they require vector
7204 // predication and replace the scalar one with the vector predication
7205 // operand if that is the case.
7206 else if (Mnemonic == "vmov" || Mnemonic.startswith("vcmp") ||
7207 (Mnemonic.startswith("vcvt") && !Mnemonic.startswith("vcvta") &&
7208 !Mnemonic.startswith("vcvtn") && !Mnemonic.startswith("vcvtp") &&
7209 !Mnemonic.startswith("vcvtm"))) {
7210 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7211 // We could not split the vector predicate off vcvt because it might
7212 // have been the scalar vcvtt instruction. Now we know its a vector
7213 // instruction, we still need to check whether its the vector
7214 // predicated vcvt with 'Then' predication or the vector vcvtt. We can
7215 // distinguish the two based on the suffixes, if it is any of
7216 // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7217 if (Mnemonic.startswith("vcvtt") && Operands.size() >= 4) {
7218 auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
7219 auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
7220 if (!(Sz1.isToken() && Sz1.getToken().startswith(".f") &&
7221 Sz2.isToken() && Sz2.getToken().startswith(".f"))) {
7222 Operands.erase(Operands.begin());
7223 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7224 VPTPredicationCode = ARMVCC::Then;
7225
7226 Mnemonic = Mnemonic.substr(0, 4);
7227 Operands.insert(Operands.begin(),
7228 ARMOperand::CreateToken(Mnemonic, MLoc));
7229 }
7230 }
7231 Operands.erase(Operands.begin() + 1);
7232 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7233 Mnemonic.size() + CarrySetting);
7234 Operands.insert(Operands.begin() + 1,
7235 ARMOperand::CreateVPTPred(
7236 ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7237 }
7238 } else if (CanAcceptVPTPredicationCode) {
7239 // For all other instructions, make sure only one of the two
7240 // predication operands is left behind, depending on whether we should
7241 // use the vector predication.
7242 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7243 if (CanAcceptPredicationCode)
7244 Operands.erase(Operands.begin() + 2);
7245 else
7246 Operands.erase(Operands.begin() + 1);
7247 } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
7248 Operands.erase(Operands.begin() + 1);
7249 }
7250 }
7251 }
7252
7253 if (VPTPredicationCode != ARMVCC::None) {
7254 bool usedVPTPredicationCode = false;
7255 for (unsigned I = 1; I < Operands.size(); ++I)
7256 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7257 usedVPTPredicationCode = true;
7258 if (!usedVPTPredicationCode) {
7259 // If we have a VPT predication code and we haven't just turned it
7260 // into an operand, then it was a mistake for splitMnemonic to
7261 // separate it from the rest of the mnemonic in the first place,
7262 // and this may lead to wrong disassembly (e.g. scalar floating
7263 // point VCMPE is actually a different instruction from VCMP, so
7264 // we mustn't treat them the same). In that situation, glue it
7265 // back on.
7266 Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7267 Operands.erase(Operands.begin());
7268 Operands.insert(Operands.begin(),
7269 ARMOperand::CreateToken(Mnemonic, NameLoc));
7270 }
7271 }
7272
7273 // ARM mode 'blx' need special handling, as the register operand version
7274 // is predicable, but the label operand version is not. So, we can't rely
7275 // on the Mnemonic based checking to correctly figure out when to put
7276 // a k_CondCode operand in the list. If we're trying to match the label
7277 // version, remove the k_CondCode operand here.
7278 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
7279 static_cast<ARMOperand &>(*Operands[2]).isImm())
7280 Operands.erase(Operands.begin() + 1);
7281
7282 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7283 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7284 // a single GPRPair reg operand is used in the .td file to replace the two
7285 // GPRs. However, when parsing from asm, the two GRPs cannot be
7286 // automatically
7287 // expressed as a GPRPair, so we have to manually merge them.
7288 // FIXME: We would really like to be able to tablegen'erate this.
7289 if (!isThumb() && Operands.size() > 4 &&
7290 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7291 Mnemonic == "stlexd")) {
7292 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7293 unsigned Idx = isLoad ? 2 : 3;
7294 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7295 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7296
7297 const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7298 // Adjust only if Op1 and Op2 are GPRs.
7299 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
7300 MRC.contains(Op2.getReg())) {
7301 unsigned Reg1 = Op1.getReg();
7302 unsigned Reg2 = Op2.getReg();
7303 unsigned Rt = MRI->getEncodingValue(Reg1);
7304 unsigned Rt2 = MRI->getEncodingValue(Reg2);
7305
7306 // Rt2 must be Rt + 1 and Rt must be even.
7307 if (Rt + 1 != Rt2 || (Rt & 1)) {
7308 return Error(Op2.getStartLoc(),
7309 isLoad ? "destination operands must be sequential"
7310 : "source operands must be sequential");
7311 }
7312 unsigned NewReg = MRI->getMatchingSuperReg(
7313 Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7314 Operands[Idx] =
7315 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7316 Operands.erase(Operands.begin() + Idx + 1);
7317 }
7318 }
7319
7320 // GNU Assembler extension (compatibility).
7321 fixupGNULDRDAlias(Mnemonic, Operands);
7322
7323 // FIXME: As said above, this is all a pretty gross hack. This instruction
7324 // does not fit with other "subs" and tblgen.
7325 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7326 // so the Mnemonic is the original name "subs" and delete the predicate
7327 // operand so it will match the table entry.
7328 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
7329 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
7330 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
7331 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
7332 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
7333 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
7334 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7335 Operands.erase(Operands.begin() + 1);
7336 }
7337 return false;
7338}
7339
7340// Validate context-sensitive operand constraints.
7341
7342// return 'true' if register list contains non-low GPR registers,
7343// 'false' otherwise. If Reg is in the register list or is HiReg, set
7344// 'containsReg' to true.
7345static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7346 unsigned Reg, unsigned HiReg,
7347 bool &containsReg) {
7348 containsReg = false;
7349 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7350 unsigned OpReg = Inst.getOperand(i).getReg();
7351 if (OpReg == Reg)
7352 containsReg = true;
7353 // Anything other than a low register isn't legal here.
7354 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7355 return true;
7356 }
7357 return false;
7358}
7359
7360// Check if the specified regisgter is in the register list of the inst,
7361// starting at the indicated operand number.
7362static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7363 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7364 unsigned OpReg = Inst.getOperand(i).getReg();
7365 if (OpReg == Reg)
7366 return true;
7367 }
7368 return false;
7369}
7370
7371// Return true if instruction has the interesting property of being
7372// allowed in IT blocks, but not being predicable.
7373static bool instIsBreakpoint(const MCInst &Inst) {
7374 return Inst.getOpcode() == ARM::tBKPT ||
7375 Inst.getOpcode() == ARM::BKPT ||
7376 Inst.getOpcode() == ARM::tHLT ||
7377 Inst.getOpcode() == ARM::HLT;
7378}
7379
7380bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7381 const OperandVector &Operands,
7382 unsigned ListNo, bool IsARPop) {
7383 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7384 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7385
7386 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7387 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
7388 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7389
7390 if (!IsARPop && ListContainsSP)
7391 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7392 "SP may not be in the register list");
7393 else if (ListContainsPC && ListContainsLR)
7394 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7395 "PC and LR may not be in the register list simultaneously");
7396 return false;
7397}
7398
7399bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7400 const OperandVector &Operands,
7401 unsigned ListNo) {
7402 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7403 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7404
7405 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7406 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7407
7408 if (ListContainsSP && ListContainsPC)
7409 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7410 "SP and PC may not be in the register list");
7411 else if (ListContainsSP)
7412 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7413 "SP may not be in the register list");
7414 else if (ListContainsPC)
7415 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7416 "PC may not be in the register list");
7417 return false;
7418}
7419
7420bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
7421 const OperandVector &Operands,
7422 bool Load, bool ARMMode, bool Writeback) {
7423 unsigned RtIndex = Load || !Writeback ? 0 : 1;
7424 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7425 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7426
7427 if (ARMMode) {
7428 // Rt can't be R14.
7429 if (Rt == 14)
7430 return Error(Operands[3]->getStartLoc(),
7431 "Rt can't be R14");
7432
7433 // Rt must be even-numbered.
7434 if ((Rt & 1) == 1)
7435 return Error(Operands[3]->getStartLoc(),
7436 "Rt must be even-numbered");
7437
7438 // Rt2 must be Rt + 1.
7439 if (Rt2 != Rt + 1) {
7440 if (Load)
7441 return Error(Operands[3]->getStartLoc(),
7442 "destination operands must be sequential");
7443 else
7444 return Error(Operands[3]->getStartLoc(),
7445 "source operands must be sequential");
7446 }
7447
7448 // FIXME: Diagnose m == 15
7449 // FIXME: Diagnose ldrd with m == t || m == t2.
7450 }
7451
7452 if (!ARMMode && Load) {
7453 if (Rt2 == Rt)
7454 return Error(Operands[3]->getStartLoc(),
7455 "destination operands can't be identical");
7456 }
7457
7458 if (Writeback) {
7459 unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7460
7461 if (Rn == Rt || Rn == Rt2) {
7462 if (Load)
7463 return Error(Operands[3]->getStartLoc(),
7464 "base register needs to be different from destination "
7465 "registers");
7466 else
7467 return Error(Operands[3]->getStartLoc(),
7468 "source register and base register can't be identical");
7469 }
7470
7471 // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7472 // (Except the immediate form of ldrd?)
7473 }
7474
7475 return false;
7476}
7477
7478static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7479 for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7480 if (ARM::isVpred(MCID.OpInfo[i].OperandType))
7481 return i;
7482 }
7483 return -1;
7484}
7485
7486static bool isVectorPredicable(const MCInstrDesc &MCID) {
7487 return findFirstVectorPredOperandIdx(MCID) != -1;
7488}
7489
7490// FIXME: We would really like to be able to tablegen'erate this.
7491bool ARMAsmParser::validateInstruction(MCInst &Inst,
7492 const OperandVector &Operands) {
7493 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7494 SMLoc Loc = Operands[0]->getStartLoc();
7495
7496 // Check the IT block state first.
7497 // NOTE: BKPT and HLT instructions have the interesting property of being
7498 // allowed in IT blocks, but not being predicable. They just always execute.
7499 if (inITBlock() && !instIsBreakpoint(Inst)) {
7500 // The instruction must be predicable.
7501 if (!MCID.isPredicable())
7502 return Error(Loc, "instructions in IT block must be predicable");
7503 ARMCC::CondCodes Cond = ARMCC::CondCodes(
7504 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7505 if (Cond != currentITCond()) {
7506 // Find the condition code Operand to get its SMLoc information.
7507 SMLoc CondLoc;
7508 for (unsigned I = 1; I < Operands.size(); ++I)
7509 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7510 CondLoc = Operands[I]->getStartLoc();
7511 return Error(CondLoc, "incorrect condition in IT block; got '" +
7512 StringRef(ARMCondCodeToString(Cond)) +
7513 "', but expected '" +
7514 ARMCondCodeToString(currentITCond()) + "'");
7515 }
7516 // Check for non-'al' condition codes outside of the IT block.
7517 } else if (isThumbTwo() && MCID.isPredicable() &&
7518 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7519 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7520 Inst.getOpcode() != ARM::t2Bcc &&
7521 Inst.getOpcode() != ARM::t2BFic) {
7522 return Error(Loc, "predicated instructions must be in IT block");
7523 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7524 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7525 ARMCC::AL) {
7526 return Warning(Loc, "predicated instructions should be in IT block");
7527 } else if (!MCID.isPredicable()) {
7528 // Check the instruction doesn't have a predicate operand anyway
7529 // that it's not allowed to use. Sometimes this happens in order
7530 // to keep instructions the same shape even though one cannot
7531 // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7532 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7533 if (MCID.OpInfo[i].isPredicate()) {
7534 if (Inst.getOperand(i).getImm() != ARMCC::AL)
7535 return Error(Loc, "instruction is not predicable");
7536 break;
7537 }
7538 }
7539 }
7540
7541 // PC-setting instructions in an IT block, but not the last instruction of
7542 // the block, are UNPREDICTABLE.
7543 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7544 return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7545 }
7546
7547 if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7548 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7549 if (!isVectorPredicable(MCID))
7550 return Error(Loc, "instruction in VPT block must be predicable");
7551 unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7552 unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7553 if (Pred != VPTPred) {
7554 SMLoc PredLoc;
7555 for (unsigned I = 1; I < Operands.size(); ++I)
7556 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7557 PredLoc = Operands[I]->getStartLoc();
7558 return Error(PredLoc, "incorrect predication in VPT block; got '" +
7559 StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7560 "', but expected '" +
7561 ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7562 }
7563 }
7564 else if (isVectorPredicable(MCID) &&
7565 Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7566 ARMVCC::None)
7567 return Error(Loc, "VPT predicated instructions must be in VPT block");
7568
7569 const unsigned Opcode = Inst.getOpcode();
7570 switch (Opcode) {
7571 case ARM::t2IT: {
7572 // Encoding is unpredictable if it ever results in a notional 'NV'
7573 // predicate. Since we don't parse 'NV' directly this means an 'AL'
7574 // predicate with an "else" mask bit.
7575 unsigned Cond = Inst.getOperand(0).getImm();
7576 unsigned Mask = Inst.getOperand(1).getImm();
7577
7578 // Conditions only allowing a 't' are those with no set bit except
7579 // the lowest-order one that indicates the end of the sequence. In
7580 // other words, powers of 2.
7581 if (Cond == ARMCC::AL && countPopulation(Mask) != 1)
7582 return Error(Loc, "unpredictable IT predicate sequence");
7583 break;
7584 }
7585 case ARM::LDRD:
7586 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7587 /*Writeback*/false))
7588 return true;
7589 break;
7590 case ARM::LDRD_PRE:
7591 case ARM::LDRD_POST:
7592 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7593 /*Writeback*/true))
7594 return true;
7595 break;
7596 case ARM::t2LDRDi8:
7597 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7598 /*Writeback*/false))
7599 return true;
7600 break;
7601 case ARM::t2LDRD_PRE:
7602 case ARM::t2LDRD_POST:
7603 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7604 /*Writeback*/true))
7605 return true;
7606 break;
7607 case ARM::t2BXJ: {
7608 const unsigned RmReg = Inst.getOperand(0).getReg();
7609 // Rm = SP is no longer unpredictable in v8-A
7610 if (RmReg == ARM::SP && !hasV8Ops())
7611 return Error(Operands[2]->getStartLoc(),
7612 "r13 (SP) is an unpredictable operand to BXJ");
7613 return false;
7614 }
7615 case ARM::STRD:
7616 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7617 /*Writeback*/false))
7618 return true;
7619 break;
7620 case ARM::STRD_PRE:
7621 case ARM::STRD_POST:
7622 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7623 /*Writeback*/true))
7624 return true;
7625 break;
7626 case ARM::t2STRD_PRE:
7627 case ARM::t2STRD_POST:
7628 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7629 /*Writeback*/true))
7630 return true;
7631 break;
7632 case ARM::STR_PRE_IMM:
7633 case ARM::STR_PRE_REG:
7634 case ARM::t2STR_PRE:
7635 case ARM::STR_POST_IMM:
7636 case ARM::STR_POST_REG:
7637 case ARM::t2STR_POST:
7638 case ARM::STRH_PRE:
7639 case ARM::t2STRH_PRE:
7640 case ARM::STRH_POST:
7641 case ARM::t2STRH_POST:
7642 case ARM::STRB_PRE_IMM:
7643 case ARM::STRB_PRE_REG:
7644 case ARM::t2STRB_PRE:
7645 case ARM::STRB_POST_IMM:
7646 case ARM::STRB_POST_REG:
7647 case ARM::t2STRB_POST: {
7648 // Rt must be different from Rn.
7649 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7650 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7651
7652 if (Rt == Rn)
7653 return Error(Operands[3]->getStartLoc(),
7654 "source register and base register can't be identical");
7655 return false;
7656 }
7657 case ARM::LDR_PRE_IMM:
7658 case ARM::LDR_PRE_REG:
7659 case ARM::t2LDR_PRE:
7660 case ARM::LDR_POST_IMM:
7661 case ARM::LDR_POST_REG:
7662 case ARM::t2LDR_POST:
7663 case ARM::LDRH_PRE:
7664 case ARM::t2LDRH_PRE:
7665 case ARM::LDRH_POST:
7666 case ARM::t2LDRH_POST:
7667 case ARM::LDRSH_PRE:
7668 case ARM::t2LDRSH_PRE:
7669 case ARM::LDRSH_POST:
7670 case ARM::t2LDRSH_POST:
7671 case ARM::LDRB_PRE_IMM:
7672 case ARM::LDRB_PRE_REG:
7673 case ARM::t2LDRB_PRE:
7674 case ARM::LDRB_POST_IMM:
7675 case ARM::LDRB_POST_REG:
7676 case ARM::t2LDRB_POST:
7677 case ARM::LDRSB_PRE:
7678 case ARM::t2LDRSB_PRE:
7679 case ARM::LDRSB_POST:
7680 case ARM::t2LDRSB_POST: {
7681 // Rt must be different from Rn.
7682 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7683 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7684
7685 if (Rt == Rn)
7686 return Error(Operands[3]->getStartLoc(),
7687 "destination register and base register can't be identical");
7688 return false;
7689 }
7690
7691 case ARM::MVE_VLDRBU8_rq:
7692 case ARM::MVE_VLDRBU16_rq:
7693 case ARM::MVE_VLDRBS16_rq:
7694 case ARM::MVE_VLDRBU32_rq:
7695 case ARM::MVE_VLDRBS32_rq:
7696 case ARM::MVE_VLDRHU16_rq:
7697 case ARM::MVE_VLDRHU16_rq_u:
7698 case ARM::MVE_VLDRHU32_rq:
7699 case ARM::MVE_VLDRHU32_rq_u:
7700 case ARM::MVE_VLDRHS32_rq:
7701 case ARM::MVE_VLDRHS32_rq_u:
7702 case ARM::MVE_VLDRWU32_rq:
7703 case ARM::MVE_VLDRWU32_rq_u:
7704 case ARM::MVE_VLDRDU64_rq:
7705 case ARM::MVE_VLDRDU64_rq_u:
7706 case ARM::MVE_VLDRWU32_qi:
7707 case ARM::MVE_VLDRWU32_qi_pre:
7708 case ARM::MVE_VLDRDU64_qi:
7709 case ARM::MVE_VLDRDU64_qi_pre: {
7710 // Qd must be different from Qm.
7711 unsigned QdIdx = 0, QmIdx = 2;
7712 bool QmIsPointer = false;
7713 switch (Opcode) {
7714 case ARM::MVE_VLDRWU32_qi:
7715 case ARM::MVE_VLDRDU64_qi:
7716 QmIdx = 1;
7717 QmIsPointer = true;
7718 break;
7719 case ARM::MVE_VLDRWU32_qi_pre:
7720 case ARM::MVE_VLDRDU64_qi_pre:
7721 QdIdx = 1;
7722 QmIsPointer = true;
7723 break;
7724 }
7725
7726 const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
7727 const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
7728
7729 if (Qd == Qm) {
7730 return Error(Operands[3]->getStartLoc(),
7731 Twine("destination vector register and vector ") +
7732 (QmIsPointer ? "pointer" : "offset") +
7733 " register can't be identical");
7734 }
7735 return false;
7736 }
7737
7738 case ARM::SBFX:
7739 case ARM::t2SBFX:
7740 case ARM::UBFX:
7741 case ARM::t2UBFX: {
7742 // Width must be in range [1, 32-lsb].
7743 unsigned LSB = Inst.getOperand(2).getImm();
7744 unsigned Widthm1 = Inst.getOperand(3).getImm();
7745 if (Widthm1 >= 32 - LSB)
7746 return Error(Operands[5]->getStartLoc(),
7747 "bitfield width must be in range [1,32-lsb]");
7748 return false;
7749 }
7750 // Notionally handles ARM::tLDMIA_UPD too.
7751 case ARM::tLDMIA: {
7752 // If we're parsing Thumb2, the .w variant is available and handles
7753 // most cases that are normally illegal for a Thumb1 LDM instruction.
7754 // We'll make the transformation in processInstruction() if necessary.
7755 //
7756 // Thumb LDM instructions are writeback iff the base register is not
7757 // in the register list.
7758 unsigned Rn = Inst.getOperand(0).getReg();
7759 bool HasWritebackToken =
7760 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7761 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7762 bool ListContainsBase;
7763 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7764 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7765 "registers must be in range r0-r7");
7766 // If we should have writeback, then there should be a '!' token.
7767 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7768 return Error(Operands[2]->getStartLoc(),
7769 "writeback operator '!' expected");
7770 // If we should not have writeback, there must not be a '!'. This is
7771 // true even for the 32-bit wide encodings.
7772 if (ListContainsBase && HasWritebackToken)
7773 return Error(Operands[3]->getStartLoc(),
7774 "writeback operator '!' not allowed when base register "
7775 "in register list");
7776
7777 if (validatetLDMRegList(Inst, Operands, 3))
7778 return true;
7779 break;
7780 }
7781 case ARM::LDMIA_UPD:
7782 case ARM::LDMDB_UPD:
7783 case ARM::LDMIB_UPD:
7784 case ARM::LDMDA_UPD:
7785 // ARM variants loading and updating the same register are only officially
7786 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7787 if (!hasV7Ops())
7788 break;
7789 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7790 return Error(Operands.back()->getStartLoc(),
7791 "writeback register not allowed in register list");
7792 break;
7793 case ARM::t2LDMIA:
7794 case ARM::t2LDMDB:
7795 if (validatetLDMRegList(Inst, Operands, 3))
7796 return true;
7797 break;
7798 case ARM::t2STMIA:
7799 case ARM::t2STMDB:
7800 if (validatetSTMRegList(Inst, Operands, 3))
7801 return true;
7802 break;
7803 case ARM::t2LDMIA_UPD:
7804 case ARM::t2LDMDB_UPD:
7805 case ARM::t2STMIA_UPD:
7806 case ARM::t2STMDB_UPD:
7807 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7808 return Error(Operands.back()->getStartLoc(),
7809 "writeback register not allowed in register list");
7810
7811 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
7812 if (validatetLDMRegList(Inst, Operands, 3))
7813 return true;
7814 } else {
7815 if (validatetSTMRegList(Inst, Operands, 3))
7816 return true;
7817 }
7818 break;
7819
7820 case ARM::sysLDMIA_UPD:
7821 case ARM::sysLDMDA_UPD:
7822 case ARM::sysLDMDB_UPD:
7823 case ARM::sysLDMIB_UPD:
7824 if (!listContainsReg(Inst, 3, ARM::PC))
7825 return Error(Operands[4]->getStartLoc(),
7826 "writeback register only allowed on system LDM "
7827 "if PC in register-list");
7828 break;
7829 case ARM::sysSTMIA_UPD:
7830 case ARM::sysSTMDA_UPD:
7831 case ARM::sysSTMDB_UPD:
7832 case ARM::sysSTMIB_UPD:
7833 return Error(Operands[2]->getStartLoc(),
7834 "system STM cannot have writeback register");
7835 case ARM::tMUL:
7836 // The second source operand must be the same register as the destination
7837 // operand.
7838 //
7839 // In this case, we must directly check the parsed operands because the
7840 // cvtThumbMultiply() function is written in such a way that it guarantees
7841 // this first statement is always true for the new Inst. Essentially, the
7842 // destination is unconditionally copied into the second source operand
7843 // without checking to see if it matches what we actually parsed.
7844 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
7845 ((ARMOperand &)*Operands[5]).getReg()) &&
7846 (((ARMOperand &)*Operands[3]).getReg() !=
7847 ((ARMOperand &)*Operands[4]).getReg())) {
7848 return Error(Operands[3]->getStartLoc(),
7849 "destination register must match source register");
7850 }
7851 break;
7852
7853 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
7854 // so only issue a diagnostic for thumb1. The instructions will be
7855 // switched to the t2 encodings in processInstruction() if necessary.
7856 case ARM::tPOP: {
7857 bool ListContainsBase;
7858 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
7859 !isThumbTwo())
7860 return Error(Operands[2]->getStartLoc(),
7861 "registers must be in range r0-r7 or pc");
7862 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
7863 return true;
7864 break;
7865 }
7866 case ARM::tPUSH: {
7867 bool ListContainsBase;
7868 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
7869 !isThumbTwo())
7870 return Error(Operands[2]->getStartLoc(),
7871 "registers must be in range r0-r7 or lr");
7872 if (validatetSTMRegList(Inst, Operands, 2))
7873 return true;
7874 break;
7875 }
7876 case ARM::tSTMIA_UPD: {
7877 bool ListContainsBase, InvalidLowList;
7878 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
7879 0, ListContainsBase);
7880 if (InvalidLowList && !isThumbTwo())
7881 return Error(Operands[4]->getStartLoc(),
7882 "registers must be in range r0-r7");
7883
7884 // This would be converted to a 32-bit stm, but that's not valid if the
7885 // writeback register is in the list.
7886 if (InvalidLowList && ListContainsBase)
7887 return Error(Operands[4]->getStartLoc(),
7888 "writeback operator '!' not allowed when base register "
7889 "in register list");
7890
7891 if (validatetSTMRegList(Inst, Operands, 4))
7892 return true;
7893 break;
7894 }
7895 case ARM::tADDrSP:
7896 // If the non-SP source operand and the destination operand are not the
7897 // same, we need thumb2 (for the wide encoding), or we have an error.
7898 if (!isThumbTwo() &&
7899 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7900 return Error(Operands[4]->getStartLoc(),
7901 "source register must be the same as destination");
7902 }
7903 break;
7904
7905 case ARM::t2ADDrr:
7906 case ARM::t2ADDrs:
7907 case ARM::t2SUBrr:
7908 case ARM::t2SUBrs:
7909 if (Inst.getOperand(0).getReg() == ARM::SP &&
7910 Inst.getOperand(1).getReg() != ARM::SP)
7911 return Error(Operands[4]->getStartLoc(),
7912 "source register must be sp if destination is sp");
7913 break;
7914
7915 // Final range checking for Thumb unconditional branch instructions.
7916 case ARM::tB:
7917 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
7918 return Error(Operands[2]->getStartLoc(), "branch target out of range");
7919 break;
7920 case ARM::t2B: {
7921 int op = (Operands[2]->isImm()) ? 2 : 3;
7922 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
7923 return Error(Operands[op]->getStartLoc(), "branch target out of range");
7924 break;
7925 }
7926 // Final range checking for Thumb conditional branch instructions.
7927 case ARM::tBcc:
7928 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
7929 return Error(Operands[2]->getStartLoc(), "branch target out of range");
7930 break;
7931 case ARM::t2Bcc: {
7932 int Op = (Operands[2]->isImm()) ? 2 : 3;
7933 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
7934 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
7935 break;
7936 }
7937 case ARM::tCBZ:
7938 case ARM::tCBNZ: {
7939 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
7940 return Error(Operands[2]->getStartLoc(), "branch target out of range");
7941 break;
7942 }
7943 case ARM::MOVi16:
7944 case ARM::MOVTi16:
7945 case ARM::t2MOVi16:
7946 case ARM::t2MOVTi16:
7947 {
7948 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
7949 // especially when we turn it into a movw and the expression <symbol> does
7950 // not have a :lower16: or :upper16 as part of the expression. We don't
7951 // want the behavior of silently truncating, which can be unexpected and
7952 // lead to bugs that are difficult to find since this is an easy mistake
7953 // to make.
7954 int i = (Operands[3]->isImm()) ? 3 : 4;
7955 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
7956 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7957 if (CE) break;
7958 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7959 if (!E) break;
7960 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
7961 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
7962 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
7963 return Error(
7964 Op.getStartLoc(),
7965 "immediate expression for mov requires :lower16: or :upper16");
7966 break;
7967 }
7968 case ARM::HINT:
7969 case ARM::t2HINT: {
7970 unsigned Imm8 = Inst.getOperand(0).getImm();
7971 unsigned Pred = Inst.getOperand(1).getImm();
7972 // ESB is not predicable (pred must be AL). Without the RAS extension, this
7973 // behaves as any other unallocated hint.
7974 if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
7975 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
7976 "predicable, but condition "
7977 "code specified");
7978 if (Imm8 == 0x14 && Pred != ARMCC::AL)
7979 return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
7980 "predicable, but condition "
7981 "code specified");
7982 break;
7983 }
7984 case ARM::t2BFi:
7985 case ARM::t2BFr:
7986 case ARM::t2BFLi:
7987 case ARM::t2BFLr: {
7988 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
7989 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
7990 return Error(Operands[2]->getStartLoc(),
7991 "branch location out of range or not a multiple of 2");
7992
7993 if (Opcode == ARM::t2BFi) {
7994 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
7995 return Error(Operands[3]->getStartLoc(),
7996 "branch target out of range or not a multiple of 2");
7997 } else if (Opcode == ARM::t2BFLi) {
7998 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
7999 return Error(Operands[3]->getStartLoc(),
8000 "branch target out of range or not a multiple of 2");
8001 }
8002 break;
8003 }
8004 case ARM::t2BFic: {
8005 if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
8006 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8007 return Error(Operands[1]->getStartLoc(),
8008 "branch location out of range or not a multiple of 2");
8009
8010 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
8011 return Error(Operands[2]->getStartLoc(),
8012 "branch target out of range or not a multiple of 2");
8013
8014 assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&((Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
"branch location and else branch target should either both be "
"immediates or both labels") ? static_cast<void> (0) :
__assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8016, __PRETTY_FUNCTION__))
8015 "branch location and else branch target should either both be "((Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
"branch location and else branch target should either both be "
"immediates or both labels") ? static_cast<void> (0) :
__assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8016, __PRETTY_FUNCTION__))
8016 "immediates or both labels")((Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
"branch location and else branch target should either both be "
"immediates or both labels") ? static_cast<void> (0) :
__assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8016, __PRETTY_FUNCTION__))
;
8017
8018 if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8019 int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8020 if (Diff != 4 && Diff != 2)
8021 return Error(
8022 Operands[3]->getStartLoc(),
8023 "else branch target must be 2 or 4 greater than the branch location");
8024 }
8025 break;
8026 }
8027 case ARM::t2CLRM: {
8028 for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8029 if (Inst.getOperand(i).isReg() &&
8030 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8031 Inst.getOperand(i).getReg())) {
8032 return Error(Operands[2]->getStartLoc(),
8033 "invalid register in register list. Valid registers are "
8034 "r0-r12, lr/r14 and APSR.");
8035 }
8036 }
8037 break;
8038 }
8039 case ARM::DSB:
8040 case ARM::t2DSB: {
8041
8042 if (Inst.getNumOperands() < 2)
8043 break;
8044
8045 unsigned Option = Inst.getOperand(0).getImm();
8046 unsigned Pred = Inst.getOperand(1).getImm();
8047
8048 // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8049 if (Option == 0 && Pred != ARMCC::AL)
8050 return Error(Operands[1]->getStartLoc(),
8051 "instruction 'ssbb' is not predicable, but condition code "
8052 "specified");
8053 if (Option == 4 && Pred != ARMCC::AL)
8054 return Error(Operands[1]->getStartLoc(),
8055 "instruction 'pssbb' is not predicable, but condition code "
8056 "specified");
8057 break;
8058 }
8059 case ARM::VMOVRRS: {
8060 // Source registers must be sequential.
8061 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8062 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8063 if (Sm1 != Sm + 1)
8064 return Error(Operands[5]->getStartLoc(),
8065 "source operands must be sequential");
8066 break;
8067 }
8068 case ARM::VMOVSRR: {
8069 // Destination registers must be sequential.
8070 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8071 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8072 if (Sm1 != Sm + 1)
8073 return Error(Operands[3]->getStartLoc(),
8074 "destination operands must be sequential");
8075 break;
8076 }
8077 case ARM::VLDMDIA:
8078 case ARM::VSTMDIA: {
8079 ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
8080 auto &RegList = Op.getRegList();
8081 if (RegList.size() < 1 || RegList.size() > 16)
8082 return Error(Operands[3]->getStartLoc(),
8083 "list of registers must be at least 1 and at most 16");
8084 break;
8085 }
8086 case ARM::MVE_VQDMULLs32bh:
8087 case ARM::MVE_VQDMULLs32th:
8088 case ARM::MVE_VCMULf32:
8089 case ARM::MVE_VMULLBs32:
8090 case ARM::MVE_VMULLTs32:
8091 case ARM::MVE_VMULLBu32:
8092 case ARM::MVE_VMULLTu32: {
8093 if (Operands[3]->getReg() == Operands[4]->getReg()) {
8094 return Error (Operands[3]->getStartLoc(),
8095 "Qd register and Qn register can't be identical");
8096 }
8097 if (Operands[3]->getReg() == Operands[5]->getReg()) {
8098 return Error (Operands[3]->getStartLoc(),
8099 "Qd register and Qm register can't be identical");
8100 }
8101 break;
8102 }
8103 case ARM::MVE_VMOV_rr_q: {
8104 if (Operands[4]->getReg() != Operands[6]->getReg())
8105 return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
8106 if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
8107 static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
8108 return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8109 break;
8110 }
8111 case ARM::MVE_VMOV_q_rr: {
8112 if (Operands[2]->getReg() != Operands[4]->getReg())
8113 return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
8114 if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
8115 static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
8116 return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8117 break;
8118 }
8119 case ARM::UMAAL:
8120 case ARM::UMLAL:
8121 case ARM::UMULL:
8122 case ARM::t2UMAAL:
8123 case ARM::t2UMLAL:
8124 case ARM::t2UMULL:
8125 case ARM::SMLAL:
8126 case ARM::SMLALBB:
8127 case ARM::SMLALBT:
8128 case ARM::SMLALD:
8129 case ARM::SMLALDX:
8130 case ARM::SMLALTB:
8131 case ARM::SMLALTT:
8132 case ARM::SMLSLD:
8133 case ARM::SMLSLDX:
8134 case ARM::SMULL:
8135 case ARM::t2SMLAL:
8136 case ARM::t2SMLALBB:
8137 case ARM::t2SMLALBT:
8138 case ARM::t2SMLALD:
8139 case ARM::t2SMLALDX:
8140 case ARM::t2SMLALTB:
8141 case ARM::t2SMLALTT:
8142 case ARM::t2SMLSLD:
8143 case ARM::t2SMLSLDX:
8144 case ARM::t2SMULL: {
8145 unsigned RdHi = Inst.getOperand(0).getReg();
8146 unsigned RdLo = Inst.getOperand(1).getReg();
8147 if(RdHi == RdLo) {
8148 return Error(Loc,
8149 "unpredictable instruction, RdHi and RdLo must be different");
8150 }
8151 break;
8152 }
8153
8154 case ARM::CDE_CX1:
8155 case ARM::CDE_CX1A:
8156 case ARM::CDE_CX1D:
8157 case ARM::CDE_CX1DA:
8158 case ARM::CDE_CX2:
8159 case ARM::CDE_CX2A:
8160 case ARM::CDE_CX2D:
8161 case ARM::CDE_CX2DA:
8162 case ARM::CDE_CX3:
8163 case ARM::CDE_CX3A:
8164 case ARM::CDE_CX3D:
8165 case ARM::CDE_CX3DA:
8166 case ARM::CDE_VCX1_vec:
8167 case ARM::CDE_VCX1_fpsp:
8168 case ARM::CDE_VCX1_fpdp:
8169 case ARM::CDE_VCX1A_vec:
8170 case ARM::CDE_VCX1A_fpsp:
8171 case ARM::CDE_VCX1A_fpdp:
8172 case ARM::CDE_VCX2_vec:
8173 case ARM::CDE_VCX2_fpsp:
8174 case ARM::CDE_VCX2_fpdp:
8175 case ARM::CDE_VCX2A_vec:
8176 case ARM::CDE_VCX2A_fpsp:
8177 case ARM::CDE_VCX2A_fpdp:
8178 case ARM::CDE_VCX3_vec:
8179 case ARM::CDE_VCX3_fpsp:
8180 case ARM::CDE_VCX3_fpdp:
8181 case ARM::CDE_VCX3A_vec:
8182 case ARM::CDE_VCX3A_fpsp:
8183 case ARM::CDE_VCX3A_fpdp: {
8184 assert(Inst.getOperand(1).isImm() &&((Inst.getOperand(1).isImm() && "CDE operand 1 must be a coprocessor ID"
) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(1).isImm() && \"CDE operand 1 must be a coprocessor ID\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8185, __PRETTY_FUNCTION__))
8185 "CDE operand 1 must be a coprocessor ID")((Inst.getOperand(1).isImm() && "CDE operand 1 must be a coprocessor ID"
) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(1).isImm() && \"CDE operand 1 must be a coprocessor ID\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8185, __PRETTY_FUNCTION__))
;
8186 int64_t Coproc = Inst.getOperand(1).getImm();
8187 if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8188 return Error(Operands[1]->getStartLoc(),
8189 "coprocessor must be configured as CDE");
8190 else if (Coproc >= 8)
8191 return Error(Operands[1]->getStartLoc(),
8192 "coprocessor must be in the range [p0, p7]");
8193 break;
8194 }
8195
8196 case ARM::t2CDP:
8197 case ARM::t2CDP2:
8198 case ARM::t2LDC2L_OFFSET:
8199 case ARM::t2LDC2L_OPTION:
8200 case ARM::t2LDC2L_POST:
8201 case ARM::t2LDC2L_PRE:
8202 case ARM::t2LDC2_OFFSET:
8203 case ARM::t2LDC2_OPTION:
8204 case ARM::t2LDC2_POST:
8205 case ARM::t2LDC2_PRE:
8206 case ARM::t2LDCL_OFFSET:
8207 case ARM::t2LDCL_OPTION:
8208 case ARM::t2LDCL_POST:
8209 case ARM::t2LDCL_PRE:
8210 case ARM::t2LDC_OFFSET:
8211 case ARM::t2LDC_OPTION:
8212 case ARM::t2LDC_POST:
8213 case ARM::t2LDC_PRE:
8214 case ARM::t2MCR:
8215 case ARM::t2MCR2:
8216 case ARM::t2MCRR:
8217 case ARM::t2MCRR2:
8218 case ARM::t2MRC:
8219 case ARM::t2MRC2:
8220 case ARM::t2MRRC:
8221 case ARM::t2MRRC2:
8222 case ARM::t2STC2L_OFFSET:
8223 case ARM::t2STC2L_OPTION:
8224 case ARM::t2STC2L_POST:
8225 case ARM::t2STC2L_PRE:
8226 case ARM::t2STC2_OFFSET:
8227 case ARM::t2STC2_OPTION:
8228 case ARM::t2STC2_POST:
8229 case ARM::t2STC2_PRE:
8230 case ARM::t2STCL_OFFSET:
8231 case ARM::t2STCL_OPTION:
8232 case ARM::t2STCL_POST:
8233 case ARM::t2STCL_PRE:
8234 case ARM::t2STC_OFFSET:
8235 case ARM::t2STC_OPTION:
8236 case ARM::t2STC_POST:
8237 case ARM::t2STC_PRE: {
8238 unsigned Opcode = Inst.getOpcode();
8239 // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8240 // CopInd is the index of the coprocessor operand.
8241 size_t CopInd = 0;
8242 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8243 CopInd = 2;
8244 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8245 CopInd = 1;
8246 assert(Inst.getOperand(CopInd).isImm() &&((Inst.getOperand(CopInd).isImm() && "Operand must be a coprocessor ID"
) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(CopInd).isImm() && \"Operand must be a coprocessor ID\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8247, __PRETTY_FUNCTION__))
8247 "Operand must be a coprocessor ID")((Inst.getOperand(CopInd).isImm() && "Operand must be a coprocessor ID"
) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(CopInd).isImm() && \"Operand must be a coprocessor ID\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8247, __PRETTY_FUNCTION__))
;
8248 int64_t Coproc = Inst.getOperand(CopInd).getImm();
8249 // Operands[2] is the coprocessor operand at syntactic level
8250 if (ARM::isCDECoproc(Coproc, *STI))
8251 return Error(Operands[2]->getStartLoc(),
8252 "coprocessor must be configured as GCP");
8253 break;
8254 }
8255 }
8256
8257 return false;
8258}
8259
8260static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8261 switch(Opc) {
8262 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8262)
;
8263 // VST1LN
8264 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8265 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8266 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8267 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8268 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8269 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8270 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
8271 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8272 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8273
8274 // VST2LN
8275 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8276 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8277 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8278 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8279 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8280
8281 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8282 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8283 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8284 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8285 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8286
8287 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
8288 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8289 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8290 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8291 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8292
8293 // VST3LN
8294 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8295 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8296 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8297 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8298 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8299 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8300 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8301 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8302 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8303 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8304 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
8305 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8306 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8307 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8308 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8309
8310 // VST3
8311 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8312 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8313 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8314 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8315 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8316 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8317 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8318 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8319 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8320 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8321 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8322 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8323 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
8324 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8325 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8326 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
8327 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8328 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8329
8330 // VST4LN
8331 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8332 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8333 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8334 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8335 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8336 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8337 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8338 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8339 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8340 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8341 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
8342 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8343 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8344 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8345 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8346
8347 // VST4
8348 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8349 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8350 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8351 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8352 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8353 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8354 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8355 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8356 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8357 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8358 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8359 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8360 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
8361 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8362 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8363 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
8364 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8365 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8366 }
8367}
8368
8369static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8370 switch(Opc) {
8371 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8371)
;
8372 // VLD1LN
8373 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8374 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8375 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8376 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8377 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8378 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8379 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
8380 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8381 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8382
8383 // VLD2LN
8384 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8385 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8386 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8387 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8388 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8389 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8390 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8391 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8392 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8393 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8394 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
8395 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8396 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8397 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8398 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8399
8400 // VLD3DUP
8401 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8402 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8403 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8404 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8405 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8406 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8407 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8408 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8409 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8410 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8411 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8412 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8413 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
8414 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8415 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8416 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8417 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8418 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8419
8420 // VLD3LN
8421 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8422 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8423 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8424 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8425 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8426 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8427 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8428 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8429 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8430 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8431 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
8432 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8433 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8434 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8435 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8436
8437 // VLD3
8438 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8439 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8440 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8441 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8442 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8443 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8444 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8445 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8446 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8447 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8448 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8449 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8450 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
8451 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8452 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8453 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
8454 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8455 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8456
8457 // VLD4LN
8458 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8459 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8460 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8461 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8462 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8463 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8464 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8465 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8466 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8467 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8468 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
8469 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8470 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8471 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8472 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8473
8474 // VLD4DUP
8475 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8476 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8477 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8478 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8479 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8480 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8481 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8482 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8483 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8484 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8485 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8486 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8487 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
8488 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8489 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8490 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8491 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8492 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8493
8494 // VLD4
8495 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8496 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8497 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8498 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8499 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8500 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8501 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8502 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8503 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8504 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8505 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8506 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8507 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
8508 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8509 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8510 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
8511 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8512 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8513 }
8514}
8515
8516bool ARMAsmParser::processInstruction(MCInst &Inst,
8517 const OperandVector &Operands,
8518 MCStreamer &Out) {
8519 // Check if we have the wide qualifier, because if it's present we
8520 // must avoid selecting a 16-bit thumb instruction.
8521 bool HasWideQualifier = false;
8522 for (auto &Op : Operands) {
8523 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8524 if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8525 HasWideQualifier = true;
8526 break;
8527 }
8528 }
8529
8530 switch (Inst.getOpcode()) {
8531 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8532 case ARM::LDRT_POST:
8533 case ARM::LDRBT_POST: {
8534 const unsigned Opcode =
8535 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8536 : ARM::LDRBT_POST_IMM;
8537 MCInst TmpInst;
8538 TmpInst.setOpcode(Opcode);
8539 TmpInst.addOperand(Inst.getOperand(0));
8540 TmpInst.addOperand(Inst.getOperand(1));
8541 TmpInst.addOperand(Inst.getOperand(1));
8542 TmpInst.addOperand(MCOperand::createReg(0));
8543 TmpInst.addOperand(MCOperand::createImm(0));
8544 TmpInst.addOperand(Inst.getOperand(2));
8545 TmpInst.addOperand(Inst.getOperand(3));
8546 Inst = TmpInst;
8547 return true;
8548 }
8549 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8550 case ARM::STRT_POST:
8551 case ARM::STRBT_POST: {
8552 const unsigned Opcode =
8553 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8554 : ARM::STRBT_POST_IMM;
8555 MCInst TmpInst;
8556 TmpInst.setOpcode(Opcode);
8557 TmpInst.addOperand(Inst.getOperand(1));
8558 TmpInst.addOperand(Inst.getOperand(0));
8559 TmpInst.addOperand(Inst.getOperand(1));
8560 TmpInst.addOperand(MCOperand::createReg(0));
8561 TmpInst.addOperand(MCOperand::createImm(0));
8562 TmpInst.addOperand(Inst.getOperand(2));
8563 TmpInst.addOperand(Inst.getOperand(3));
8564 Inst = TmpInst;
8565 return true;
8566 }
8567 // Alias for alternate form of 'ADR Rd, #imm' instruction.
8568 case ARM::ADDri: {
8569 if (Inst.getOperand(1).getReg() != ARM::PC ||
8570 Inst.getOperand(5).getReg() != 0 ||
8571 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8572 return false;
8573 MCInst TmpInst;
8574 TmpInst.setOpcode(ARM::ADR);
8575 TmpInst.addOperand(Inst.getOperand(0));
8576 if (Inst.getOperand(2).isImm()) {
8577 // Immediate (mod_imm) will be in its encoded form, we must unencode it
8578 // before passing it to the ADR instruction.
8579 unsigned Enc = Inst.getOperand(2).getImm();
8580 TmpInst.addOperand(MCOperand::createImm(
8581 ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8582 } else {
8583 // Turn PC-relative expression into absolute expression.
8584 // Reading PC provides the start of the current instruction + 8 and
8585 // the transform to adr is biased by that.
8586 MCSymbol *Dot = getContext().createTempSymbol();
8587 Out.emitLabel(Dot);
8588 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8589 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8590 MCSymbolRefExpr::VK_None,
8591 getContext());
8592 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8593 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8594 getContext());
8595 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8596 getContext());
8597 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8598 }
8599 TmpInst.addOperand(Inst.getOperand(3));
8600 TmpInst.addOperand(Inst.getOperand(4));
8601 Inst = TmpInst;
8602 return true;
8603 }
8604 // Aliases for alternate PC+imm syntax of LDR instructions.
8605 case ARM::t2LDRpcrel:
8606 // Select the narrow version if the immediate will fit.
8607 if (Inst.getOperand(1).getImm() > 0 &&
8608 Inst.getOperand(1).getImm() <= 0xff &&
8609 !HasWideQualifier)
8610 Inst.setOpcode(ARM::tLDRpci);
8611 else
8612 Inst.setOpcode(ARM::t2LDRpci);
8613 return true;
8614 case ARM::t2LDRBpcrel:
8615 Inst.setOpcode(ARM::t2LDRBpci);
8616 return true;
8617 case ARM::t2LDRHpcrel:
8618 Inst.setOpcode(ARM::t2LDRHpci);
8619 return true;
8620 case ARM::t2LDRSBpcrel:
8621 Inst.setOpcode(ARM::t2LDRSBpci);
8622 return true;
8623 case ARM::t2LDRSHpcrel:
8624 Inst.setOpcode(ARM::t2LDRSHpci);
8625 return true;
8626 case ARM::LDRConstPool:
8627 case ARM::tLDRConstPool:
8628 case ARM::t2LDRConstPool: {
8629 // Pseudo instruction ldr rt, =immediate is converted to a
8630 // MOV rt, immediate if immediate is known and representable
8631 // otherwise we create a constant pool entry that we load from.
8632 MCInst TmpInst;
8633 if (Inst.getOpcode() == ARM::LDRConstPool)
8634 TmpInst.setOpcode(ARM::LDRi12);
8635 else if (Inst.getOpcode() == ARM::tLDRConstPool)
8636 TmpInst.setOpcode(ARM::tLDRpci);
8637 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
8638 TmpInst.setOpcode(ARM::t2LDRpci);
8639 const ARMOperand &PoolOperand =
8640 (HasWideQualifier ?
8641 static_cast<ARMOperand &>(*Operands[4]) :
8642 static_cast<ARMOperand &>(*Operands[3]));
8643 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
8644 // If SubExprVal is a constant we may be able to use a MOV
8645 if (isa<MCConstantExpr>(SubExprVal) &&
8646 Inst.getOperand(0).getReg() != ARM::PC &&
8647 Inst.getOperand(0).getReg() != ARM::SP) {
8648 int64_t Value =
8649 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
8650 bool UseMov = true;
8651 bool MovHasS = true;
8652 if (Inst.getOpcode() == ARM::LDRConstPool) {
8653 // ARM Constant
8654 if (ARM_AM::getSOImmVal(Value) != -1) {
8655 Value = ARM_AM::getSOImmVal(Value);
8656 TmpInst.setOpcode(ARM::MOVi);
8657 }
8658 else if (ARM_AM::getSOImmVal(~Value) != -1) {
8659 Value = ARM_AM::getSOImmVal(~Value);
8660 TmpInst.setOpcode(ARM::MVNi);
8661 }
8662 else if (hasV6T2Ops() &&
8663 Value >=0 && Value < 65536) {
8664 TmpInst.setOpcode(ARM::MOVi16);
8665 MovHasS = false;
8666 }
8667 else
8668 UseMov = false;
8669 }
8670 else {
8671 // Thumb/Thumb2 Constant
8672 if (hasThumb2() &&
8673 ARM_AM::getT2SOImmVal(Value) != -1)
8674 TmpInst.setOpcode(ARM::t2MOVi);
8675 else if (hasThumb2() &&
8676 ARM_AM::getT2SOImmVal(~Value) != -1) {
8677 TmpInst.setOpcode(ARM::t2MVNi);
8678 Value = ~Value;
8679 }
8680 else if (hasV8MBaseline() &&
8681 Value >=0 && Value < 65536) {
8682 TmpInst.setOpcode(ARM::t2MOVi16);
8683 MovHasS = false;
8684 }
8685 else
8686 UseMov = false;
8687 }
8688 if (UseMov) {
8689 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8690 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
8691 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8692 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8693 if (MovHasS)
8694 TmpInst.addOperand(MCOperand::createReg(0)); // S
8695 Inst = TmpInst;
8696 return true;
8697 }
8698 }
8699 // No opportunity to use MOV/MVN create constant pool
8700 const MCExpr *CPLoc =
8701 getTargetStreamer().addConstantPoolEntry(SubExprVal,
8702 PoolOperand.getStartLoc());
8703 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8704 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
8705 if (TmpInst.getOpcode() == ARM::LDRi12)
8706 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
8707 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8708 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8709 Inst = TmpInst;
8710 return true;
8711 }
8712 // Handle NEON VST complex aliases.
8713 case ARM::VST1LNdWB_register_Asm_8:
8714 case ARM::VST1LNdWB_register_Asm_16:
8715 case ARM::VST1LNdWB_register_Asm_32: {
8716 MCInst TmpInst;
8717 // Shuffle the operands around so the lane index operand is in the
8718 // right place.
8719 unsigned Spacing;
8720 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8721 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8722 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8723 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8724 TmpInst.addOperand(Inst.getOperand(4)); // Rm
8725 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8726 TmpInst.addOperand(Inst.getOperand(1)); // lane
8727 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8728 TmpInst.addOperand(Inst.getOperand(6));
8729 Inst = TmpInst;
8730 return true;
8731 }
8732
8733 case ARM::VST2LNdWB_register_Asm_8:
8734 case ARM::VST2LNdWB_register_Asm_16:
8735 case ARM::VST2LNdWB_register_Asm_32:
8736 case ARM::VST2LNqWB_register_Asm_16:
8737 case ARM::VST2LNqWB_register_Asm_32: {
8738 MCInst TmpInst;
8739 // Shuffle the operands around so the lane index operand is in the
8740 // right place.
8741 unsigned Spacing;
8742 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8743 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8744 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8745 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8746 TmpInst.addOperand(Inst.getOperand(4)); // Rm
8747 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8748 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8749 Spacing));
8750 TmpInst.addOperand(Inst.getOperand(1)); // lane
8751 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8752 TmpInst.addOperand(Inst.getOperand(6));
8753 Inst = TmpInst;
8754 return true;
8755 }
8756
8757 case ARM::VST3LNdWB_register_Asm_8:
8758 case ARM::VST3LNdWB_register_Asm_16:
8759 case ARM::VST3LNdWB_register_Asm_32:
8760 case ARM::VST3LNqWB_register_Asm_16:
8761 case ARM::VST3LNqWB_register_Asm_32: {
8762 MCInst TmpInst;
8763 // Shuffle the operands around so the lane index operand is in the
8764 // right place.
8765 unsigned Spacing;
8766 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8767 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8768 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8769 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8770 TmpInst.addOperand(Inst.getOperand(4)); // Rm
8771 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8772 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8773 Spacing));
8774 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8775 Spacing * 2));
8776 TmpInst.addOperand(Inst.getOperand(1)); // lane
8777 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8778 TmpInst.addOperand(Inst.getOperand(6));
8779 Inst = TmpInst;
8780 return true;
8781 }
8782
8783 case ARM::VST4LNdWB_register_Asm_8:
8784 case ARM::VST4LNdWB_register_Asm_16:
8785 case ARM::VST4LNdWB_register_Asm_32:
8786 case ARM::VST4LNqWB_register_Asm_16:
8787 case ARM::VST4LNqWB_register_Asm_32: {
8788 MCInst TmpInst;
8789 // Shuffle the operands around so the lane index operand is in the
8790 // right place.
8791 unsigned Spacing;
8792 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8793 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8794 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8795 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8796 TmpInst.addOperand(Inst.getOperand(4)); // Rm
8797 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8798 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8799 Spacing));
8800 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8801 Spacing * 2));
8802 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8803 Spacing * 3));
8804 TmpInst.addOperand(Inst.getOperand(1)); // lane
8805 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8806 TmpInst.addOperand(Inst.getOperand(6));
8807 Inst = TmpInst;
8808 return true;
8809 }
8810
8811 case ARM::VST1LNdWB_fixed_Asm_8:
8812 case ARM::VST1LNdWB_fixed_Asm_16:
8813 case ARM::VST1LNdWB_fixed_Asm_32: {
8814 MCInst TmpInst;
8815 // Shuffle the operands around so the lane index operand is in the
8816 // right place.
8817 unsigned Spacing;
8818 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8819 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8820 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8821 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8822 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8823 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8824 TmpInst.addOperand(Inst.getOperand(1)); // lane
8825 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8826 TmpInst.addOperand(Inst.getOperand(5));
8827 Inst = TmpInst;
8828 return true;
8829 }
8830
8831 case ARM::VST2LNdWB_fixed_Asm_8:
8832 case ARM::VST2LNdWB_fixed_Asm_16:
8833 case ARM::VST2LNdWB_fixed_Asm_32:
8834 case ARM::VST2LNqWB_fixed_Asm_16:
8835 case ARM::VST2LNqWB_fixed_Asm_32: {
8836 MCInst TmpInst;
8837 // Shuffle the operands around so the lane index operand is in the
8838 // right place.
8839 unsigned Spacing;
8840 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8841 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8842 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8843 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8844 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8845 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8846 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8847 Spacing));
8848 TmpInst.addOperand(Inst.getOperand(1)); // lane
8849 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8850 TmpInst.addOperand(Inst.getOperand(5));
8851 Inst = TmpInst;
8852 return true;
8853 }
8854
8855 case ARM::VST3LNdWB_fixed_Asm_8:
8856 case ARM::VST3LNdWB_fixed_Asm_16:
8857 case ARM::VST3LNdWB_fixed_Asm_32:
8858 case ARM::VST3LNqWB_fixed_Asm_16:
8859 case ARM::VST3LNqWB_fixed_Asm_32: {
8860 MCInst TmpInst;
8861 // Shuffle the operands around so the lane index operand is in the
8862 // right place.
8863 unsigned Spacing;
8864 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8865 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8866 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8867 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8868 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8869 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8870 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8871 Spacing));
8872 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8873 Spacing * 2));
8874 TmpInst.addOperand(Inst.getOperand(1)); // lane
8875 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8876 TmpInst.addOperand(Inst.getOperand(5));
8877 Inst = TmpInst;
8878 return true;
8879 }
8880
8881 case ARM::VST4LNdWB_fixed_Asm_8:
8882 case ARM::VST4LNdWB_fixed_Asm_16:
8883 case ARM::VST4LNdWB_fixed_Asm_32:
8884 case ARM::VST4LNqWB_fixed_Asm_16:
8885 case ARM::VST4LNqWB_fixed_Asm_32: {
8886 MCInst TmpInst;
8887 // Shuffle the operands around so the lane index operand is in the
8888 // right place.
8889 unsigned Spacing;
8890 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8891 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8892 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8893 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8894 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8895 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8896 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8897 Spacing));
8898 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8899 Spacing * 2));
8900 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8901 Spacing * 3));
8902 TmpInst.addOperand(Inst.getOperand(1)); // lane
8903 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8904 TmpInst.addOperand(Inst.getOperand(5));
8905 Inst = TmpInst;
8906 return true;
8907 }
8908
8909 case ARM::VST1LNdAsm_8:
8910 case ARM::VST1LNdAsm_16:
8911 case ARM::VST1LNdAsm_32: {
8912 MCInst TmpInst;
8913 // Shuffle the operands around so the lane index operand is in the
8914 // right place.
8915 unsigned Spacing;
8916 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8917 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8918 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8919 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8920 TmpInst.addOperand(Inst.getOperand(1)); // lane
8921 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8922 TmpInst.addOperand(Inst.getOperand(5));
8923 Inst = TmpInst;
8924 return true;
8925 }
8926
8927 case ARM::VST2LNdAsm_8:
8928 case ARM::VST2LNdAsm_16:
8929 case ARM::VST2LNdAsm_32:
8930 case ARM::VST2LNqAsm_16:
8931 case ARM::VST2LNqAsm_32: {
8932 MCInst TmpInst;
8933 // Shuffle the operands around so the lane index operand is in the
8934 // right place.
8935 unsigned Spacing;
8936 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8937 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8938 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8939 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8940 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8941 Spacing));
8942 TmpInst.addOperand(Inst.getOperand(1)); // lane
8943 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8944 TmpInst.addOperand(Inst.getOperand(5));
8945 Inst = TmpInst;
8946 return true;
8947 }
8948
8949 case ARM::VST3LNdAsm_8:
8950 case ARM::VST3LNdAsm_16:
8951 case ARM::VST3LNdAsm_32:
8952 case ARM::VST3LNqAsm_16:
8953 case ARM::VST3LNqAsm_32: {
8954 MCInst TmpInst;
8955 // Shuffle the operands around so the lane index operand is in the
8956 // right place.
8957 unsigned Spacing;
8958 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8959 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8960 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8961 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8962 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8963 Spacing));
8964 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8965 Spacing * 2));
8966 TmpInst.addOperand(Inst.getOperand(1)); // lane
8967 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8968 TmpInst.addOperand(Inst.getOperand(5));
8969 Inst = TmpInst;
8970 return true;
8971 }
8972
8973 case ARM::VST4LNdAsm_8:
8974 case ARM::VST4LNdAsm_16:
8975 case ARM::VST4LNdAsm_32:
8976 case ARM::VST4LNqAsm_16:
8977 case ARM::VST4LNqAsm_32: {
8978 MCInst TmpInst;
8979 // Shuffle the operands around so the lane index operand is in the
8980 // right place.
8981 unsigned Spacing;
8982 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8983 TmpInst.addOperand(Inst.getOperand(2)); // Rn
8984 TmpInst.addOperand(Inst.getOperand(3)); // alignment
8985 TmpInst.addOperand(Inst.getOperand(0)); // Vd
8986 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8987 Spacing));
8988 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8989 Spacing * 2));
8990 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8991 Spacing * 3));
8992 TmpInst.addOperand(Inst.getOperand(1)); // lane
8993 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8994 TmpInst.addOperand(Inst.getOperand(5));
8995 Inst = TmpInst;
8996 return true;
8997 }
8998
8999 // Handle NEON VLD complex aliases.
9000 case ARM::VLD1LNdWB_register_Asm_8:
9001 case ARM::VLD1LNdWB_register_Asm_16:
9002 case ARM::VLD1LNdWB_register_Asm_32: {
9003 MCInst TmpInst;
9004 // Shuffle the operands around so the lane index operand is in the
9005 // right place.
9006 unsigned Spacing;
9007 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9008 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9009 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9010 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9011 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9012 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9013 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9014 TmpInst.addOperand(Inst.getOperand(1)); // lane
9015 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9016 TmpInst.addOperand(Inst.getOperand(6));
9017 Inst = TmpInst;
9018 return true;
9019 }
9020
9021 case ARM::VLD2LNdWB_register_Asm_8:
9022 case ARM::VLD2LNdWB_register_Asm_16:
9023 case ARM::VLD2LNdWB_register_Asm_32:
9024 case ARM::VLD2LNqWB_register_Asm_16:
9025 case ARM::VLD2LNqWB_register_Asm_32: {
9026 MCInst TmpInst;
9027 // Shuffle the operands around so the lane index operand is in the
9028 // right place.
9029 unsigned Spacing;
9030 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9031 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9032 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9033 Spacing));
9034 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9035 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9036 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9037 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9038 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9039 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9040 Spacing));
9041 TmpInst.addOperand(Inst.getOperand(1)); // lane
9042 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9043 TmpInst.addOperand(Inst.getOperand(6));
9044 Inst = TmpInst;
9045 return true;
9046 }
9047
9048 case ARM::VLD3LNdWB_register_Asm_8:
9049 case ARM::VLD3LNdWB_register_Asm_16:
9050 case ARM::VLD3LNdWB_register_Asm_32:
9051 case ARM::VLD3LNqWB_register_Asm_16:
9052 case ARM::VLD3LNqWB_register_Asm_32: {
9053 MCInst TmpInst;
9054 // Shuffle the operands around so the lane index operand is in the
9055 // right place.
9056 unsigned Spacing;
9057 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9058 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9059 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9060 Spacing));
9061 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9062 Spacing * 2));
9063 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9064 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9065 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9066 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9067 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9068 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9069 Spacing));
9070 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9071 Spacing * 2));
9072 TmpInst.addOperand(Inst.getOperand(1)); // lane
9073 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9074 TmpInst.addOperand(Inst.getOperand(6));
9075 Inst = TmpInst;
9076 return true;
9077 }
9078
9079 case ARM::VLD4LNdWB_register_Asm_8:
9080 case ARM::VLD4LNdWB_register_Asm_16:
9081 case ARM::VLD4LNdWB_register_Asm_32:
9082 case ARM::VLD4LNqWB_register_Asm_16:
9083 case ARM::VLD4LNqWB_register_Asm_32: {
9084 MCInst TmpInst;
9085 // Shuffle the operands around so the lane index operand is in the
9086 // right place.
9087 unsigned Spacing;
9088 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9089 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9090 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9091 Spacing));
9092 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9093 Spacing * 2));
9094 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9095 Spacing * 3));
9096 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9097 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9098 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9099 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9100 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9101 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9102 Spacing));
9103 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9104 Spacing * 2));
9105 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9106 Spacing * 3));
9107 TmpInst.addOperand(Inst.getOperand(1)); // lane
9108 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9109 TmpInst.addOperand(Inst.getOperand(6));
9110 Inst = TmpInst;
9111 return true;
9112 }
9113
9114 case ARM::VLD1LNdWB_fixed_Asm_8:
9115 case ARM::VLD1LNdWB_fixed_Asm_16:
9116 case ARM::VLD1LNdWB_fixed_Asm_32: {
9117 MCInst TmpInst;
9118 // Shuffle the operands around so the lane index operand is in the
9119 // right place.
9120 unsigned Spacing;
9121 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9122 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9123 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9124 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9125 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9126 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9127 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9128 TmpInst.addOperand(Inst.getOperand(1)); // lane
9129 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9130 TmpInst.addOperand(Inst.getOperand(5));
9131 Inst = TmpInst;
9132 return true;
9133 }
9134
9135 case ARM::VLD2LNdWB_fixed_Asm_8:
9136 case ARM::VLD2LNdWB_fixed_Asm_16:
9137 case ARM::VLD2LNdWB_fixed_Asm_32:
9138 case ARM::VLD2LNqWB_fixed_Asm_16:
9139 case ARM::VLD2LNqWB_fixed_Asm_32: {
9140 MCInst TmpInst;
9141 // Shuffle the operands around so the lane index operand is in the
9142 // right place.
9143 unsigned Spacing;
9144 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9145 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9146 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9147 Spacing));
9148 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9149 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9150 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9151 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9152 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9153 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9154 Spacing));
9155 TmpInst.addOperand(Inst.getOperand(1)); // lane
9156 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9157 TmpInst.addOperand(Inst.getOperand(5));
9158 Inst = TmpInst;
9159 return true;
9160 }
9161
9162 case ARM::VLD3LNdWB_fixed_Asm_8:
9163 case ARM::VLD3LNdWB_fixed_Asm_16:
9164 case ARM::VLD3LNdWB_fixed_Asm_32:
9165 case ARM::VLD3LNqWB_fixed_Asm_16:
9166 case ARM::VLD3LNqWB_fixed_Asm_32: {
9167 MCInst TmpInst;
9168 // Shuffle the operands around so the lane index operand is in the
9169 // right place.
9170 unsigned Spacing;
9171 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9172 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9173 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9174 Spacing));
9175 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9176 Spacing * 2));
9177 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9178 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9179 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9180 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9181 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9182 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9183 Spacing));
9184 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9185 Spacing * 2));
9186 TmpInst.addOperand(Inst.getOperand(1)); // lane
9187 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9188 TmpInst.addOperand(Inst.getOperand(5));
9189 Inst = TmpInst;
9190 return true;
9191 }
9192
9193 case ARM::VLD4LNdWB_fixed_Asm_8:
9194 case ARM::VLD4LNdWB_fixed_Asm_16:
9195 case ARM::VLD4LNdWB_fixed_Asm_32:
9196 case ARM::VLD4LNqWB_fixed_Asm_16:
9197 case ARM::VLD4LNqWB_fixed_Asm_32: {
9198 MCInst TmpInst;
9199 // Shuffle the operands around so the lane index operand is in the
9200 // right place.
9201 unsigned Spacing;
9202 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9203 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9204 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9205 Spacing));
9206 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9207 Spacing * 2));
9208 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9209 Spacing * 3));
9210 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9211 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9212 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9213 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9214 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9215 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9216 Spacing));
9217 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9218 Spacing * 2));
9219 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9220 Spacing * 3));
9221 TmpInst.addOperand(Inst.getOperand(1)); // lane
9222 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9223 TmpInst.addOperand(Inst.getOperand(5));
9224 Inst = TmpInst;
9225 return true;
9226 }
9227
9228 case ARM::VLD1LNdAsm_8:
9229 case ARM::VLD1LNdAsm_16:
9230 case ARM::VLD1LNdAsm_32: {
9231 MCInst TmpInst;
9232 // Shuffle the operands around so the lane index operand is in the
9233 // right place.
9234 unsigned Spacing;
9235 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9236 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9237 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9238 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9239 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9240 TmpInst.addOperand(Inst.getOperand(1)); // lane
9241 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9242 TmpInst.addOperand(Inst.getOperand(5));
9243 Inst = TmpInst;
9244 return true;
9245 }
9246
9247 case ARM::VLD2LNdAsm_8:
9248 case ARM::VLD2LNdAsm_16:
9249 case ARM::VLD2LNdAsm_32:
9250 case ARM::VLD2LNqAsm_16:
9251 case ARM::VLD2LNqAsm_32: {
9252 MCInst TmpInst;
9253 // Shuffle the operands around so the lane index operand is in the
9254 // right place.
9255 unsigned Spacing;
9256 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9257 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9258 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9259 Spacing));
9260 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9261 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9262 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9263 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9264 Spacing));
9265 TmpInst.addOperand(Inst.getOperand(1)); // lane
9266 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9267 TmpInst.addOperand(Inst.getOperand(5));
9268 Inst = TmpInst;
9269 return true;
9270 }
9271
9272 case ARM::VLD3LNdAsm_8:
9273 case ARM::VLD3LNdAsm_16:
9274 case ARM::VLD3LNdAsm_32:
9275 case ARM::VLD3LNqAsm_16:
9276 case ARM::VLD3LNqAsm_32: {
9277 MCInst TmpInst;
9278 // Shuffle the operands around so the lane index operand is in the
9279 // right place.
9280 unsigned Spacing;
9281 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9282 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9283 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9284 Spacing));
9285 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9286 Spacing * 2));
9287 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9288 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9289 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9290 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9291 Spacing));
9292 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9293 Spacing * 2));
9294 TmpInst.addOperand(Inst.getOperand(1)); // lane
9295 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9296 TmpInst.addOperand(Inst.getOperand(5));
9297 Inst = TmpInst;
9298 return true;
9299 }
9300
9301 case ARM::VLD4LNdAsm_8:
9302 case ARM::VLD4LNdAsm_16:
9303 case ARM::VLD4LNdAsm_32:
9304 case ARM::VLD4LNqAsm_16:
9305 case ARM::VLD4LNqAsm_32: {
9306 MCInst TmpInst;
9307 // Shuffle the operands around so the lane index operand is in the
9308 // right place.
9309 unsigned Spacing;
9310 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9311 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9312 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9313 Spacing));
9314 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9315 Spacing * 2));
9316 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9317 Spacing * 3));
9318 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9319 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9320 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9321 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9322 Spacing));
9323 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9324 Spacing * 2));
9325 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9326 Spacing * 3));
9327 TmpInst.addOperand(Inst.getOperand(1)); // lane
9328 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9329 TmpInst.addOperand(Inst.getOperand(5));
9330 Inst = TmpInst;
9331 return true;
9332 }
9333
9334 // VLD3DUP single 3-element structure to all lanes instructions.
9335 case ARM::VLD3DUPdAsm_8:
9336 case ARM::VLD3DUPdAsm_16:
9337 case ARM::VLD3DUPdAsm_32:
9338 case ARM::VLD3DUPqAsm_8:
9339 case ARM::VLD3DUPqAsm_16:
9340 case ARM::VLD3DUPqAsm_32: {
9341 MCInst TmpInst;
9342 unsigned Spacing;
9343 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9344 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9345 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9346 Spacing));
9347 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9348 Spacing * 2));
9349 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9350 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9351 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9352 TmpInst.addOperand(Inst.getOperand(4));
9353 Inst = TmpInst;
9354 return true;
9355 }
9356
9357 case ARM::VLD3DUPdWB_fixed_Asm_8:
9358 case ARM::VLD3DUPdWB_fixed_Asm_16:
9359 case ARM::VLD3DUPdWB_fixed_Asm_32:
9360 case ARM::VLD3DUPqWB_fixed_Asm_8:
9361 case ARM::VLD3DUPqWB_fixed_Asm_16:
9362 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9363 MCInst TmpInst;
9364 unsigned Spacing;
9365 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9366 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9367 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9368 Spacing));
9369 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9370 Spacing * 2));
9371 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9372 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9373 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9374 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9375 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9376 TmpInst.addOperand(Inst.getOperand(4));
9377 Inst = TmpInst;
9378 return true;
9379 }
9380
9381 case ARM::VLD3DUPdWB_register_Asm_8:
9382 case ARM::VLD3DUPdWB_register_Asm_16:
9383 case ARM::VLD3DUPdWB_register_Asm_32:
9384 case ARM::VLD3DUPqWB_register_Asm_8:
9385 case ARM::VLD3DUPqWB_register_Asm_16:
9386 case ARM::VLD3DUPqWB_register_Asm_32: {
9387 MCInst TmpInst;
9388 unsigned Spacing;
9389 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9390 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9391 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9392 Spacing));
9393 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9394 Spacing * 2));
9395 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9396 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9397 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9398 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9399 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9400 TmpInst.addOperand(Inst.getOperand(5));
9401 Inst = TmpInst;
9402 return true;
9403 }
9404
9405 // VLD3 multiple 3-element structure instructions.
9406 case ARM::VLD3dAsm_8:
9407 case ARM::VLD3dAsm_16:
9408 case ARM::VLD3dAsm_32:
9409 case ARM::VLD3qAsm_8:
9410 case ARM::VLD3qAsm_16:
9411 case ARM::VLD3qAsm_32: {
9412 MCInst TmpInst;
9413 unsigned Spacing;
9414 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9415 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9416 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9417 Spacing));
9418 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9419 Spacing * 2));
9420 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9421 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9422 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9423 TmpInst.addOperand(Inst.getOperand(4));
9424 Inst = TmpInst;
9425 return true;
9426 }
9427
9428 case ARM::VLD3dWB_fixed_Asm_8:
9429 case ARM::VLD3dWB_fixed_Asm_16:
9430 case ARM::VLD3dWB_fixed_Asm_32:
9431 case ARM::VLD3qWB_fixed_Asm_8:
9432 case ARM::VLD3qWB_fixed_Asm_16:
9433 case ARM::VLD3qWB_fixed_Asm_32: {
9434 MCInst TmpInst;
9435 unsigned Spacing;
9436 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9437 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9438 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9439 Spacing));
9440 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9441 Spacing * 2));
9442 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9443 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9444 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9445 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9446 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9447 TmpInst.addOperand(Inst.getOperand(4));
9448 Inst = TmpInst;
9449 return true;
9450 }
9451
9452 case ARM::VLD3dWB_register_Asm_8:
9453 case ARM::VLD3dWB_register_Asm_16:
9454 case ARM::VLD3dWB_register_Asm_32:
9455 case ARM::VLD3qWB_register_Asm_8:
9456 case ARM::VLD3qWB_register_Asm_16:
9457 case ARM::VLD3qWB_register_Asm_32: {
9458 MCInst TmpInst;
9459 unsigned Spacing;
9460 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9461 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9462 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9463 Spacing));
9464 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9465 Spacing * 2));
9466 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9467 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9468 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9469 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9470 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9471 TmpInst.addOperand(Inst.getOperand(5));
9472 Inst = TmpInst;
9473 return true;
9474 }
9475
9476 // VLD4DUP single 3-element structure to all lanes instructions.
9477 case ARM::VLD4DUPdAsm_8:
9478 case ARM::VLD4DUPdAsm_16:
9479 case ARM::VLD4DUPdAsm_32:
9480 case ARM::VLD4DUPqAsm_8:
9481 case ARM::VLD4DUPqAsm_16:
9482 case ARM::VLD4DUPqAsm_32: {
9483 MCInst TmpInst;
9484 unsigned Spacing;
9485 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9486 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9487 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9488 Spacing));
9489 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9490 Spacing * 2));
9491 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9492 Spacing * 3));
9493 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9494 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9495 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9496 TmpInst.addOperand(Inst.getOperand(4));
9497 Inst = TmpInst;
9498 return true;
9499 }
9500
9501 case ARM::VLD4DUPdWB_fixed_Asm_8:
9502 case ARM::VLD4DUPdWB_fixed_Asm_16:
9503 case ARM::VLD4DUPdWB_fixed_Asm_32:
9504 case ARM::VLD4DUPqWB_fixed_Asm_8:
9505 case ARM::VLD4DUPqWB_fixed_Asm_16:
9506 case ARM::VLD4DUPqWB_fixed_Asm_32: {
9507 MCInst TmpInst;
9508 unsigned Spacing;
9509 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9510 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9511 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9512 Spacing));
9513 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9514 Spacing * 2));
9515 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9516 Spacing * 3));
9517 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9518 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9519 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9520 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9521 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9522 TmpInst.addOperand(Inst.getOperand(4));
9523 Inst = TmpInst;
9524 return true;
9525 }
9526
9527 case ARM::VLD4DUPdWB_register_Asm_8:
9528 case ARM::VLD4DUPdWB_register_Asm_16:
9529 case ARM::VLD4DUPdWB_register_Asm_32:
9530 case ARM::VLD4DUPqWB_register_Asm_8:
9531 case ARM::VLD4DUPqWB_register_Asm_16:
9532 case ARM::VLD4DUPqWB_register_Asm_32: {
9533 MCInst TmpInst;
9534 unsigned Spacing;
9535 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9536 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9537 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9538 Spacing));
9539 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9540 Spacing * 2));
9541 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9542 Spacing * 3));
9543 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9544 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9545 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9546 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9547 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9548 TmpInst.addOperand(Inst.getOperand(5));
9549 Inst = TmpInst;
9550 return true;
9551 }
9552
9553 // VLD4 multiple 4-element structure instructions.
9554 case ARM::VLD4dAsm_8:
9555 case ARM::VLD4dAsm_16:
9556 case ARM::VLD4dAsm_32:
9557 case ARM::VLD4qAsm_8:
9558 case ARM::VLD4qAsm_16:
9559 case ARM::VLD4qAsm_32: {
9560 MCInst TmpInst;
9561 unsigned Spacing;
9562 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9563 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9564 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9565 Spacing));
9566 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9567 Spacing * 2));
9568 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9569 Spacing * 3));
9570 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9571 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9572 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9573 TmpInst.addOperand(Inst.getOperand(4));
9574 Inst = TmpInst;
9575 return true;
9576 }
9577
9578 case ARM::VLD4dWB_fixed_Asm_8:
9579 case ARM::VLD4dWB_fixed_Asm_16:
9580 case ARM::VLD4dWB_fixed_Asm_32:
9581 case ARM::VLD4qWB_fixed_Asm_8:
9582 case ARM::VLD4qWB_fixed_Asm_16:
9583 case ARM::VLD4qWB_fixed_Asm_32: {
9584 MCInst TmpInst;
9585 unsigned Spacing;
9586 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9587 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9588 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9589 Spacing));
9590 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9591 Spacing * 2));
9592 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9593 Spacing * 3));
9594 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9595 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9596 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9597 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9598 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9599 TmpInst.addOperand(Inst.getOperand(4));
9600 Inst = TmpInst;
9601 return true;
9602 }
9603
9604 case ARM::VLD4dWB_register_Asm_8:
9605 case ARM::VLD4dWB_register_Asm_16:
9606 case ARM::VLD4dWB_register_Asm_32:
9607 case ARM::VLD4qWB_register_Asm_8:
9608 case ARM::VLD4qWB_register_Asm_16:
9609 case ARM::VLD4qWB_register_Asm_32: {
9610 MCInst TmpInst;
9611 unsigned Spacing;
9612 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9613 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9614 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9615 Spacing));
9616 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9617 Spacing * 2));
9618 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9619 Spacing * 3));
9620 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9621 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9622 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9623 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9624 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9625 TmpInst.addOperand(Inst.getOperand(5));
9626 Inst = TmpInst;
9627 return true;
9628 }
9629
9630 // VST3 multiple 3-element structure instructions.
9631 case ARM::VST3dAsm_8:
9632 case ARM::VST3dAsm_16:
9633 case ARM::VST3dAsm_32:
9634 case ARM::VST3qAsm_8:
9635 case ARM::VST3qAsm_16:
9636 case ARM::VST3qAsm_32: {
9637 MCInst TmpInst;
9638 unsigned Spacing;
9639 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9640 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9641 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9642 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9643 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9644 Spacing));
9645 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9646 Spacing * 2));
9647 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9648 TmpInst.addOperand(Inst.getOperand(4));
9649 Inst = TmpInst;
9650 return true;
9651 }
9652
9653 case ARM::VST3dWB_fixed_Asm_8:
9654 case ARM::VST3dWB_fixed_Asm_16:
9655 case ARM::VST3dWB_fixed_Asm_32:
9656 case ARM::VST3qWB_fixed_Asm_8:
9657 case ARM::VST3qWB_fixed_Asm_16:
9658 case ARM::VST3qWB_fixed_Asm_32: {
9659 MCInst TmpInst;
9660 unsigned Spacing;
9661 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9662 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9663 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9664 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9665 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9666 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9667 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9668 Spacing));
9669 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9670 Spacing * 2));
9671 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9672 TmpInst.addOperand(Inst.getOperand(4));
9673 Inst = TmpInst;
9674 return true;
9675 }
9676
9677 case ARM::VST3dWB_register_Asm_8:
9678 case ARM::VST3dWB_register_Asm_16:
9679 case ARM::VST3dWB_register_Asm_32:
9680 case ARM::VST3qWB_register_Asm_8:
9681 case ARM::VST3qWB_register_Asm_16:
9682 case ARM::VST3qWB_register_Asm_32: {
9683 MCInst TmpInst;
9684 unsigned Spacing;
9685 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9686 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9687 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9688 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9689 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9690 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9691 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9692 Spacing));
9693 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9694 Spacing * 2));
9695 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9696 TmpInst.addOperand(Inst.getOperand(5));
9697 Inst = TmpInst;
9698 return true;
9699 }
9700
9701 // VST4 multiple 3-element structure instructions.
9702 case ARM::VST4dAsm_8:
9703 case ARM::VST4dAsm_16:
9704 case ARM::VST4dAsm_32:
9705 case ARM::VST4qAsm_8:
9706 case ARM::VST4qAsm_16:
9707 case ARM::VST4qAsm_32: {
9708 MCInst TmpInst;
9709 unsigned Spacing;
9710 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9711 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9712 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9713 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9714 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9715 Spacing));
9716 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9717 Spacing * 2));
9718 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9719 Spacing * 3));
9720 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9721 TmpInst.addOperand(Inst.getOperand(4));
9722 Inst = TmpInst;
9723 return true;
9724 }
9725
9726 case ARM::VST4dWB_fixed_Asm_8:
9727 case ARM::VST4dWB_fixed_Asm_16:
9728 case ARM::VST4dWB_fixed_Asm_32:
9729 case ARM::VST4qWB_fixed_Asm_8:
9730 case ARM::VST4qWB_fixed_Asm_16:
9731 case ARM::VST4qWB_fixed_Asm_32: {
9732 MCInst TmpInst;
9733 unsigned Spacing;
9734 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9735 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9736 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9737 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9738 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9739 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9740 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9741 Spacing));
9742 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9743 Spacing * 2));
9744 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9745 Spacing * 3));
9746 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9747 TmpInst.addOperand(Inst.getOperand(4));
9748 Inst = TmpInst;
9749 return true;
9750 }
9751
9752 case ARM::VST4dWB_register_Asm_8:
9753 case ARM::VST4dWB_register_Asm_16:
9754 case ARM::VST4dWB_register_Asm_32:
9755 case ARM::VST4qWB_register_Asm_8:
9756 case ARM::VST4qWB_register_Asm_16:
9757 case ARM::VST4qWB_register_Asm_32: {
9758 MCInst TmpInst;
9759 unsigned Spacing;
9760 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9761 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9762 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9763 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9764 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9765 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9766 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9767 Spacing));
9768 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9769 Spacing * 2));
9770 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9771 Spacing * 3));
9772 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9773 TmpInst.addOperand(Inst.getOperand(5));
9774 Inst = TmpInst;
9775 return true;
9776 }
9777
9778 // Handle encoding choice for the shift-immediate instructions.
9779 case ARM::t2LSLri:
9780 case ARM::t2LSRri:
9781 case ARM::t2ASRri:
9782 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9783 isARMLowRegister(Inst.getOperand(1).getReg()) &&
9784 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9785 !HasWideQualifier) {
9786 unsigned NewOpc;
9787 switch (Inst.getOpcode()) {
9788 default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 9788)
;
9789 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
9790 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
9791 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
9792 }
9793 // The Thumb1 operands aren't in the same order. Awesome, eh?
9794 MCInst TmpInst;
9795 TmpInst.setOpcode(NewOpc);
9796 TmpInst.addOperand(Inst.getOperand(0));
9797 TmpInst.addOperand(Inst.getOperand(5));
9798 TmpInst.addOperand(Inst.getOperand(1));
9799 TmpInst.addOperand(Inst.getOperand(2));
9800 TmpInst.addOperand(Inst.getOperand(3));
9801 TmpInst.addOperand(Inst.getOperand(4));
9802 Inst = TmpInst;
9803 return true;
9804 }
9805 return false;
9806
9807 // Handle the Thumb2 mode MOV complex aliases.
9808 case ARM::t2MOVsr:
9809 case ARM::t2MOVSsr: {
9810 // Which instruction to expand to depends on the CCOut operand and
9811 // whether we're in an IT block if the register operands are low
9812 // registers.
9813 bool isNarrow = false;
9814 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9815 isARMLowRegister(Inst.getOperand(1).getReg()) &&
9816 isARMLowRegister(Inst.getOperand(2).getReg()) &&
9817 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
9818 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
9819 !HasWideQualifier)
9820 isNarrow = true;
9821 MCInst TmpInst;
9822 unsigned newOpc;
9823 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
9824 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 9824)
;
9825 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
9826 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
9827 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
9828 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
9829 }
9830 TmpInst.setOpcode(newOpc);
9831 TmpInst.addOperand(Inst.getOperand(0)); // Rd
9832 if (isNarrow)
9833 TmpInst.addOperand(MCOperand::createReg(
9834 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
9835 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9836 TmpInst.addOperand(Inst.getOperand(2)); // Rm
9837 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9838 TmpInst.addOperand(Inst.getOperand(5));
9839 if (!isNarrow)
9840 TmpInst.addOperand(MCOperand::createReg(
9841 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
9842 Inst = TmpInst;
9843 return true;
9844 }
9845 case ARM::t2MOVsi:
9846 case ARM::t2MOVSsi: {
9847 // Which instruction to expand to depends on the CCOut operand and
9848 // whether we're in an IT block if the register operands are low
9849 // registers.
9850 bool isNarrow = false;
9851 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9852 isARMLowRegister(Inst.getOperand(1).getReg()) &&
9853 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
9854 !HasWideQualifier)
9855 isNarrow = true;
9856 MCInst TmpInst;
9857 unsigned newOpc;
9858 unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
9859 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
9860 bool isMov = false;
9861 // MOV rd, rm, LSL #0 is actually a MOV instruction
9862 if (Shift == ARM_AM::lsl && Amount == 0) {
9863 isMov = true;
9864 // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
9865 // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
9866 // unpredictable in an IT block so the 32-bit encoding T3 has to be used
9867 // instead.
9868 if (inITBlock()) {
9869 isNarrow = false;
9870 }
9871 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
9872 } else {
9873 switch(Shift) {
9874 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 9874)
;
9875 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
9876 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
9877 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
9878 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
9879 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
9880 }
9881 }
9882 if (Amount == 32) Amount = 0;
9883 TmpInst.setOpcode(newOpc);
9884 TmpInst.addOperand(Inst.getOperand(0)); // Rd
9885 if (isNarrow && !isMov)
9886 TmpInst.addOperand(MCOperand::createReg(
9887 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
9888 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9889 if (newOpc != ARM::t2RRX && !isMov)
9890 TmpInst.addOperand(MCOperand::createImm(Amount));
9891 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9892 TmpInst.addOperand(Inst.getOperand(4));
9893 if (!isNarrow)
9894 TmpInst.addOperand(MCOperand::createReg(
9895 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
9896 Inst = TmpInst;
9897 return true;
9898 }
9899 // Handle the ARM mode MOV complex aliases.
9900 case ARM::ASRr:
9901 case ARM::LSRr:
9902 case ARM::LSLr:
9903 case ARM::RORr: {
9904 ARM_AM::ShiftOpc ShiftTy;
9905 switch(Inst.getOpcode()) {
9906 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 9906)
;
9907 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
9908 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
9909 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
9910 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
9911 }
9912 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
9913 MCInst TmpInst;
9914 TmpInst.setOpcode(ARM::MOVsr);
9915 TmpInst.addOperand(Inst.getOperand(0)); // Rd
9916 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9917 TmpInst.addOperand(Inst.getOperand(2)); // Rm
9918 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9919 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9920 TmpInst.addOperand(Inst.getOperand(4));
9921 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
9922 Inst = TmpInst;
9923 return true;
9924 }
9925 case ARM::ASRi:
9926 case ARM::LSRi:
9927 case ARM::LSLi:
9928 case ARM::RORi: {
9929 ARM_AM::ShiftOpc ShiftTy;
9930 switch(Inst.getOpcode()) {
9931 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 9931)
;
9932 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
9933 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
9934 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
9935 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
9936 }
9937 // A shift by zero is a plain MOVr, not a MOVsi.
9938 unsigned Amt = Inst.getOperand(2).getImm();
9939 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
9940 // A shift by 32 should be encoded as 0 when permitted
9941 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
9942 Amt = 0;
9943 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
9944 MCInst TmpInst;
9945 TmpInst.setOpcode(Opc);
9946 TmpInst.addOperand(Inst.getOperand(0)); // Rd
9947 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9948 if (Opc == ARM::MOVsi)
9949 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9950 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9951 TmpInst.addOperand(Inst.getOperand(4));
9952 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
9953 Inst = TmpInst;
9954 return true;
9955 }
9956 case ARM::RRXi: {
9957 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
9958 MCInst TmpInst;
9959 TmpInst.setOpcode(ARM::MOVsi);
9960 TmpInst.addOperand(Inst.getOperand(0)); // Rd
9961 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9962 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9963 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9964 TmpInst.addOperand(Inst.getOperand(3));
9965 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
9966 Inst = TmpInst;
9967 return true;
9968 }
9969 case ARM::t2LDMIA_UPD: {
9970 // If this is a load of a single register, then we should use
9971 // a post-indexed LDR instruction instead, per the ARM ARM.
9972 if (Inst.getNumOperands() != 5)
9973 return false;
9974 MCInst TmpInst;
9975 TmpInst.setOpcode(ARM::t2LDR_POST);
9976 TmpInst.addOperand(Inst.getOperand(4)); // Rt
9977 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9978 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9979 TmpInst.addOperand(MCOperand::createImm(4));
9980 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9981 TmpInst.addOperand(Inst.getOperand(3));
9982 Inst = TmpInst;
9983 return true;
9984 }
9985 case ARM::t2STMDB_UPD: {
9986 // If this is a store of a single register, then we should use
9987 // a pre-indexed STR instruction instead, per the ARM ARM.
9988 if (Inst.getNumOperands() != 5)
9989 return false;
9990 MCInst TmpInst;
9991 TmpInst.setOpcode(ARM::t2STR_PRE);
9992 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9993 TmpInst.addOperand(Inst.getOperand(4)); // Rt
9994 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9995 TmpInst.addOperand(MCOperand::createImm(-4));
9996 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9997 TmpInst.addOperand(Inst.getOperand(3));
9998 Inst = TmpInst;
9999 return true;
10000 }
10001 case ARM::LDMIA_UPD:
10002 // If this is a load of a single register via a 'pop', then we should use
10003 // a post-indexed LDR instruction instead, per the ARM ARM.
10004 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10005 Inst.getNumOperands() == 5) {
10006 MCInst TmpInst;
10007 TmpInst.setOpcode(ARM::LDR_POST_IMM);
10008 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10009 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10010 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10011 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
10012 TmpInst.addOperand(MCOperand::createImm(4));
10013 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10014 TmpInst.addOperand(Inst.getOperand(3));
10015 Inst = TmpInst;
10016 return true;
10017 }
10018 break;
10019 case ARM::STMDB_UPD:
10020 // If this is a store of a single register via a 'push', then we should use
10021 // a pre-indexed STR instruction instead, per the ARM ARM.
10022 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10023 Inst.getNumOperands() == 5) {
10024 MCInst TmpInst;
10025 TmpInst.setOpcode(ARM::STR_PRE_IMM);
10026 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10027 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10028 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10029 TmpInst.addOperand(MCOperand::createImm(-4));
10030 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10031 TmpInst.addOperand(Inst.getOperand(3));
10032 Inst = TmpInst;
10033 }
10034 break;
10035 case ARM::t2ADDri12:
10036 case ARM::t2SUBri12:
10037 case ARM::t2ADDspImm12:
10038 case ARM::t2SUBspImm12: {
10039 // If the immediate fits for encoding T3 and the generic
10040 // mnemonic was used, encoding T3 is preferred.
10041 const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10042 if ((Token != "add" && Token != "sub") ||
10043 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10044 break;
10045 switch (Inst.getOpcode()) {
10046 case ARM::t2ADDri12:
10047 Inst.setOpcode(ARM::t2ADDri);
10048 break;
10049 case ARM::t2SUBri12:
10050 Inst.setOpcode(ARM::t2SUBri);
10051 break;
10052 case ARM::t2ADDspImm12:
10053 Inst.setOpcode(ARM::t2ADDspImm);
10054 break;
10055 case ARM::t2SUBspImm12:
10056 Inst.setOpcode(ARM::t2SUBspImm);
10057 break;
10058 }
10059
10060 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10061 return true;
10062 }
10063 case ARM::tADDi8:
10064 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10065 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10066 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10067 // to encoding T1 if <Rd> is omitted."
10068 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10069 Inst.setOpcode(ARM::tADDi3);
10070 return true;
10071 }
10072 break;
10073 case ARM::tSUBi8:
10074 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10075 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10076 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10077 // to encoding T1 if <Rd> is omitted."
10078 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10079 Inst.setOpcode(ARM::tSUBi3);
10080 return true;
10081 }
10082 break;
10083 case ARM::t2ADDri:
10084 case ARM::t2SUBri: {
10085 // If the destination and first source operand are the same, and
10086 // the flags are compatible with the current IT status, use encoding T2
10087 // instead of T3. For compatibility with the system 'as'. Make sure the
10088 // wide encoding wasn't explicit.
10089 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10090 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10091 (Inst.getOperand(2).isImm() &&
10092 (unsigned)Inst.getOperand(2).getImm() > 255) ||
10093 Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10094 HasWideQualifier)
10095 break;
10096 MCInst TmpInst;
10097 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10098 ARM::tADDi8 : ARM::tSUBi8);
10099 TmpInst.addOperand(Inst.getOperand(0));
10100 TmpInst.addOperand(Inst.getOperand(5));
10101 TmpInst.addOperand(Inst.getOperand(0));
10102 TmpInst.addOperand(Inst.getOperand(2));
10103 TmpInst.addOperand(Inst.getOperand(3));
10104 TmpInst.addOperand(Inst.getOperand(4));
10105 Inst = TmpInst;
10106 return true;
10107 }
10108 case ARM::t2ADDspImm:
10109 case ARM::t2SUBspImm: {
10110 // Prefer T1 encoding if possible
10111 if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10112 break;
10113 unsigned V = Inst.getOperand(2).getImm();
10114 if (V & 3 || V > ((1 << 7) - 1) << 2)
10115 break;
10116 MCInst TmpInst;
10117 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10118 : ARM::tSUBspi);
10119 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10120 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10121 TmpInst.addOperand(MCOperand::createImm(V / 4)); // immediate
10122 TmpInst.addOperand(Inst.getOperand(3)); // pred
10123 TmpInst.addOperand(Inst.getOperand(4));
10124 Inst = TmpInst;
10125 return true;
10126 }
10127 case ARM::t2ADDrr: {
10128 // If the destination and first source operand are the same, and
10129 // there's no setting of the flags, use encoding T2 instead of T3.
10130 // Note that this is only for ADD, not SUB. This mirrors the system
10131 // 'as' behaviour. Also take advantage of ADD being commutative.
10132 // Make sure the wide encoding wasn't explicit.
10133 bool Swap = false;
10134 auto DestReg = Inst.getOperand(0).getReg();
10135 bool Transform = DestReg == Inst.getOperand(1).getReg();
10136 if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10137 Transform = true;
10138 Swap = true;
10139 }
10140 if (!Transform ||
10141 Inst.getOperand(5).getReg() != 0 ||
10142 HasWideQualifier)
10143 break;
10144 MCInst TmpInst;
10145 TmpInst.setOpcode(ARM::tADDhirr);
10146 TmpInst.addOperand(Inst.getOperand(0));
10147 TmpInst.addOperand(Inst.getOperand(0));
10148 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10149 TmpInst.addOperand(Inst.getOperand(3));
10150 TmpInst.addOperand(Inst.getOperand(4));
10151 Inst = TmpInst;
10152 return true;
10153 }
10154 case ARM::tADDrSP:
10155 // If the non-SP source operand and the destination operand are not the
10156 // same, we need to use the 32-bit encoding if it's available.
10157 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10158 Inst.setOpcode(ARM::t2ADDrr);
10159 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10160 return true;
10161 }
10162 break;
10163 case ARM::tB:
10164 // A Thumb conditional branch outside of an IT block is a tBcc.
10165 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10166 Inst.setOpcode(ARM::tBcc);
10167 return true;
10168 }
10169 break;
10170 case ARM::t2B:
10171 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10172 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10173 Inst.setOpcode(ARM::t2Bcc);
10174 return true;
10175 }
10176 break;
10177 case ARM::t2Bcc:
10178 // If the conditional is AL or we're in an IT block, we really want t2B.
10179 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10180 Inst.setOpcode(ARM::t2B);
10181 return true;
10182 }
10183 break;
10184 case ARM::tBcc:
10185 // If the conditional is AL, we really want tB.
10186 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10187 Inst.setOpcode(ARM::tB);
10188 return true;
10189 }
10190 break;
10191 case ARM::tLDMIA: {
10192 // If the register list contains any high registers, or if the writeback
10193 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10194 // instead if we're in Thumb2. Otherwise, this should have generated
10195 // an error in validateInstruction().
10196 unsigned Rn = Inst.getOperand(0).getReg();
10197 bool hasWritebackToken =
10198 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
10199 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
10200 bool listContainsBase;
10201 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10202 (!listContainsBase && !hasWritebackToken) ||
10203 (listContainsBase && hasWritebackToken)) {
10204 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10205 assert(isThumbTwo())((isThumbTwo()) ? static_cast<void> (0) : __assert_fail
("isThumbTwo()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10205, __PRETTY_FUNCTION__))
;
10206 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10207 // If we're switching to the updating version, we need to insert
10208 // the writeback tied operand.
10209 if (hasWritebackToken)
10210 Inst.insert(Inst.begin(),
10211 MCOperand::createReg(Inst.getOperand(0).getReg()));
10212 return true;
10213 }
10214 break;
10215 }
10216 case ARM::tSTMIA_UPD: {
10217 // If the register list contains any high registers, we need to use
10218 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10219 // should have generated an error in validateInstruction().
10220 unsigned Rn = Inst.getOperand(0).getReg();
10221 bool listContainsBase;
10222 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10223 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10224 assert(isThumbTwo())((isThumbTwo()) ? static_cast<void> (0) : __assert_fail
("isThumbTwo()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10224, __PRETTY_FUNCTION__))
;
10225 Inst.setOpcode(ARM::t2STMIA_UPD);
10226 return true;
10227 }
10228 break;
10229 }
10230 case ARM::tPOP: {
10231 bool listContainsBase;
10232 // If the register list contains any high registers, we need to use
10233 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10234 // should have generated an error in validateInstruction().
10235 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10236 return false;
10237 assert(isThumbTwo())((isThumbTwo()) ? static_cast<void> (0) : __assert_fail
("isThumbTwo()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10237, __PRETTY_FUNCTION__))
;
10238 Inst.setOpcode(ARM::t2LDMIA_UPD);
10239 // Add the base register and writeback operands.
10240 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10241 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10242 return true;
10243 }
10244 case ARM::tPUSH: {
10245 bool listContainsBase;
10246 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10247 return false;
10248 assert(isThumbTwo())((isThumbTwo()) ? static_cast<void> (0) : __assert_fail
("isThumbTwo()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10248, __PRETTY_FUNCTION__))
;
10249 Inst.setOpcode(ARM::t2STMDB_UPD);
10250 // Add the base register and writeback operands.
10251 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10252 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10253 return true;
10254 }
10255 case ARM::t2MOVi:
10256 // If we can use the 16-bit encoding and the user didn't explicitly
10257 // request the 32-bit variant, transform it here.
10258 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10259 (Inst.getOperand(1).isImm() &&
10260 (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10261 Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10262 !HasWideQualifier) {
10263 // The operands aren't in the same order for tMOVi8...
10264 MCInst TmpInst;
10265 TmpInst.setOpcode(ARM::tMOVi8);
10266 TmpInst.addOperand(Inst.getOperand(0));
10267 TmpInst.addOperand(Inst.getOperand(4));
10268 TmpInst.addOperand(Inst.getOperand(1));
10269 TmpInst.addOperand(Inst.getOperand(2));
10270 TmpInst.addOperand(Inst.getOperand(3));
10271 Inst = TmpInst;
10272 return true;
10273 }
10274 break;
10275
10276 case ARM::t2MOVr:
10277 // If we can use the 16-bit encoding and the user didn't explicitly
10278 // request the 32-bit variant, transform it here.
10279 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10280 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10281 Inst.getOperand(2).getImm() == ARMCC::AL &&
10282 Inst.getOperand(4).getReg() == ARM::CPSR &&
10283 !HasWideQualifier) {
10284 // The operands aren't the same for tMOV[S]r... (no cc_out)
10285 MCInst TmpInst;
10286 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
10287 TmpInst.addOperand(Inst.getOperand(0));
10288 TmpInst.addOperand(Inst.getOperand(1));
10289 TmpInst.addOperand(Inst.getOperand(2));
10290 TmpInst.addOperand(Inst.getOperand(3));
10291 Inst = TmpInst;
10292 return true;
10293 }
10294 break;
10295
10296 case ARM::t2SXTH:
10297 case ARM::t2SXTB:
10298 case ARM::t2UXTH:
10299 case ARM::t2UXTB:
10300 // If we can use the 16-bit encoding and the user didn't explicitly
10301 // request the 32-bit variant, transform it here.
10302 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10303 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10304 Inst.getOperand(2).getImm() == 0 &&
10305 !HasWideQualifier) {
10306 unsigned NewOpc;
10307 switch (Inst.getOpcode()) {
10308 default: llvm_unreachable("Illegal opcode!")::llvm::llvm_unreachable_internal("Illegal opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10308)
;
10309 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10310 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10311 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10312 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10313 }
10314 // The operands aren't the same for thumb1 (no rotate operand).
10315 MCInst TmpInst;
10316 TmpInst.setOpcode(NewOpc);
10317 TmpInst.addOperand(Inst.getOperand(0));
10318 TmpInst.addOperand(Inst.getOperand(1));
10319 TmpInst.addOperand(Inst.getOperand(3));
10320 TmpInst.addOperand(Inst.getOperand(4));
10321 Inst = TmpInst;
10322 return true;
10323 }
10324 break;
10325
10326 case ARM::MOVsi: {
10327 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10328 // rrx shifts and asr/lsr of #32 is encoded as 0
10329 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10330 return false;
10331 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10332 // Shifting by zero is accepted as a vanilla 'MOVr'
10333 MCInst TmpInst;
10334 TmpInst.setOpcode(ARM::MOVr);
10335 TmpInst.addOperand(Inst.getOperand(0));
10336 TmpInst.addOperand(Inst.getOperand(1));
10337 TmpInst.addOperand(Inst.getOperand(3));
10338 TmpInst.addOperand(Inst.getOperand(4));
10339 TmpInst.addOperand(Inst.getOperand(5));
10340 Inst = TmpInst;
10341 return true;
10342 }
10343 return false;
10344 }
10345 case ARM::ANDrsi:
10346 case ARM::ORRrsi:
10347 case ARM::EORrsi:
10348 case ARM::BICrsi:
10349 case ARM::SUBrsi:
10350 case ARM::ADDrsi: {
10351 unsigned newOpc;
10352 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
10353 if (SOpc == ARM_AM::rrx) return false;
10354 switch (Inst.getOpcode()) {
10355 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10355)
;
10356 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10357 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10358 case ARM::EORrsi: newOpc = ARM::EORrr; break;
10359 case ARM::BICrsi: newOpc = ARM::BICrr; break;
10360 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10361 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10362 }
10363 // If the shift is by zero, use the non-shifted instruction definition.
10364 // The exception is for right shifts, where 0 == 32
10365 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10366 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10367 MCInst TmpInst;
10368 TmpInst.setOpcode(newOpc);
10369 TmpInst.addOperand(Inst.getOperand(0));
10370 TmpInst.addOperand(Inst.getOperand(1));
10371 TmpInst.addOperand(Inst.getOperand(2));
10372 TmpInst.addOperand(Inst.getOperand(4));
10373 TmpInst.addOperand(Inst.getOperand(5));
10374 TmpInst.addOperand(Inst.getOperand(6));
10375 Inst = TmpInst;
10376 return true;
10377 }
10378 return false;
10379 }
10380 case ARM::ITasm:
10381 case ARM::t2IT: {
10382 // Set up the IT block state according to the IT instruction we just
10383 // matched.
10384 assert(!inITBlock() && "nested IT blocks?!")((!inITBlock() && "nested IT blocks?!") ? static_cast
<void> (0) : __assert_fail ("!inITBlock() && \"nested IT blocks?!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10384, __PRETTY_FUNCTION__))
;
10385 startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10386 Inst.getOperand(1).getImm());
10387 break;
10388 }
10389 case ARM::t2LSLrr:
10390 case ARM::t2LSRrr:
10391 case ARM::t2ASRrr:
10392 case ARM::t2SBCrr:
10393 case ARM::t2RORrr:
10394 case ARM::t2BICrr:
10395 // Assemblers should use the narrow encodings of these instructions when permissible.
10396 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10397 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10398 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10399 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10400 !HasWideQualifier) {
10401 unsigned NewOpc;
10402 switch (Inst.getOpcode()) {
10403 default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10403)
;
10404 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10405 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10406 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10407 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10408 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10409 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10410 }
10411 MCInst TmpInst;
10412 TmpInst.setOpcode(NewOpc);
10413 TmpInst.addOperand(Inst.getOperand(0));
10414 TmpInst.addOperand(Inst.getOperand(5));
10415 TmpInst.addOperand(Inst.getOperand(1));
10416 TmpInst.addOperand(Inst.getOperand(2));
10417 TmpInst.addOperand(Inst.getOperand(3));
10418 TmpInst.addOperand(Inst.getOperand(4));
10419 Inst = TmpInst;
10420 return true;
10421 }
10422 return false;
10423
10424 case ARM::t2ANDrr:
10425 case ARM::t2EORrr:
10426 case ARM::t2ADCrr:
10427 case ARM::t2ORRrr:
10428 // Assemblers should use the narrow encodings of these instructions when permissible.
10429 // These instructions are special in that they are commutable, so shorter encodings
10430 // are available more often.
10431 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10432 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10433 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10434 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10435 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10436 !HasWideQualifier) {
10437 unsigned NewOpc;
10438 switch (Inst.getOpcode()) {
10439 default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10439)
;
10440 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10441 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10442 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10443 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10444 }
10445 MCInst TmpInst;
10446 TmpInst.setOpcode(NewOpc);
10447 TmpInst.addOperand(Inst.getOperand(0));
10448 TmpInst.addOperand(Inst.getOperand(5));
10449 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
10450 TmpInst.addOperand(Inst.getOperand(1));
10451 TmpInst.addOperand(Inst.getOperand(2));
10452 } else {
10453 TmpInst.addOperand(Inst.getOperand(2));
10454 TmpInst.addOperand(Inst.getOperand(1));
10455 }
10456 TmpInst.addOperand(Inst.getOperand(3));
10457 TmpInst.addOperand(Inst.getOperand(4));
10458 Inst = TmpInst;
10459 return true;
10460 }
10461 return false;
10462 case ARM::MVE_VPST:
10463 case ARM::MVE_VPTv16i8:
10464 case ARM::MVE_VPTv8i16:
10465 case ARM::MVE_VPTv4i32:
10466 case ARM::MVE_VPTv16u8:
10467 case ARM::MVE_VPTv8u16:
10468 case ARM::MVE_VPTv4u32:
10469 case ARM::MVE_VPTv16s8:
10470 case ARM::MVE_VPTv8s16:
10471 case ARM::MVE_VPTv4s32:
10472 case ARM::MVE_VPTv4f32:
10473 case ARM::MVE_VPTv8f16:
10474 case ARM::MVE_VPTv16i8r:
10475 case ARM::MVE_VPTv8i16r:
10476 case ARM::MVE_VPTv4i32r:
10477 case ARM::MVE_VPTv16u8r:
10478 case ARM::MVE_VPTv8u16r:
10479 case ARM::MVE_VPTv4u32r:
10480 case ARM::MVE_VPTv16s8r:
10481 case ARM::MVE_VPTv8s16r:
10482 case ARM::MVE_VPTv4s32r:
10483 case ARM::MVE_VPTv4f32r:
10484 case ARM::MVE_VPTv8f16r: {
10485 assert(!inVPTBlock() && "Nested VPT blocks are not allowed")((!inVPTBlock() && "Nested VPT blocks are not allowed"
) ? static_cast<void> (0) : __assert_fail ("!inVPTBlock() && \"Nested VPT blocks are not allowed\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10485, __PRETTY_FUNCTION__))
;
10486 MCOperand &MO = Inst.getOperand(0);
10487 VPTState.Mask = MO.getImm();
10488 VPTState.CurPosition = 0;
10489 break;
10490 }
10491 }
10492 return false;
10493}
10494
10495unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
10496 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
10497 // suffix depending on whether they're in an IT block or not.
10498 unsigned Opc = Inst.getOpcode();
10499 const MCInstrDesc &MCID = MII.get(Opc);
10500 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
10501 assert(MCID.hasOptionalDef() &&((MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand"
) ? static_cast<void> (0) : __assert_fail ("MCID.hasOptionalDef() && \"optionally flag setting instruction missing optional def operand\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10502, __PRETTY_FUNCTION__))
10502 "optionally flag setting instruction missing optional def operand")((MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand"
) ? static_cast<void> (0) : __assert_fail ("MCID.hasOptionalDef() && \"optionally flag setting instruction missing optional def operand\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10502, __PRETTY_FUNCTION__))
;
10503 assert(MCID.NumOperands == Inst.getNumOperands() &&((MCID.NumOperands == Inst.getNumOperands() && "operand count mismatch!"
) ? static_cast<void> (0) : __assert_fail ("MCID.NumOperands == Inst.getNumOperands() && \"operand count mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10504, __PRETTY_FUNCTION__))
10504 "operand count mismatch!")((MCID.NumOperands == Inst.getNumOperands() && "operand count mismatch!"
) ? static_cast<void> (0) : __assert_fail ("MCID.NumOperands == Inst.getNumOperands() && \"operand count mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10504, __PRETTY_FUNCTION__))
;
10505 // Find the optional-def operand (cc_out).
10506 unsigned OpNo;
10507 for (OpNo = 0;
10508 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
10509 ++OpNo)
10510 ;
10511 // If we're parsing Thumb1, reject it completely.
10512 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
10513 return Match_RequiresFlagSetting;
10514 // If we're parsing Thumb2, which form is legal depends on whether we're
10515 // in an IT block.
10516 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
10517 !inITBlock())
10518 return Match_RequiresITBlock;
10519 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
10520 inITBlock())
10521 return Match_RequiresNotITBlock;
10522 // LSL with zero immediate is not allowed in an IT block
10523 if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
10524 return Match_RequiresNotITBlock;
10525 } else if (isThumbOne()) {
10526 // Some high-register supporting Thumb1 encodings only allow both registers
10527 // to be from r0-r7 when in Thumb2.
10528 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
10529 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10530 isARMLowRegister(Inst.getOperand(2).getReg()))
10531 return Match_RequiresThumb2;
10532 // Others only require ARMv6 or later.
10533 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
10534 isARMLowRegister(Inst.getOperand(0).getReg()) &&
10535 isARMLowRegister(Inst.getOperand(1).getReg()))
10536 return Match_RequiresV6;
10537 }
10538
10539 // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
10540 // than the loop below can handle, so it uses the GPRnopc register class and
10541 // we do SP handling here.
10542 if (Opc == ARM::t2MOVr && !hasV8Ops())
10543 {
10544 // SP as both source and destination is not allowed
10545 if (Inst.getOperand(0).getReg() == ARM::SP &&
10546 Inst.getOperand(1).getReg() == ARM::SP)
10547 return Match_RequiresV8;
10548 // When flags-setting SP as either source or destination is not allowed
10549 if (Inst.getOperand(4).getReg() == ARM::CPSR &&
10550 (Inst.getOperand(0).getReg() == ARM::SP ||
10551 Inst.getOperand(1).getReg() == ARM::SP))
10552 return Match_RequiresV8;
10553 }
10554
10555 switch (Inst.getOpcode()) {
10556 case ARM::VMRS:
10557 case ARM::VMSR:
10558 case ARM::VMRS_FPCXTS:
10559 case ARM::VMRS_FPCXTNS:
10560 case ARM::VMSR_FPCXTS:
10561 case ARM::VMSR_FPCXTNS:
10562 case ARM::VMRS_FPSCR_NZCVQC:
10563 case ARM::VMSR_FPSCR_NZCVQC:
10564 case ARM::FMSTAT:
10565 case ARM::VMRS_VPR:
10566 case ARM::VMRS_P0:
10567 case ARM::VMSR_VPR:
10568 case ARM::VMSR_P0:
10569 // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
10570 // ARMv8-A.
10571 if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
10572 (isThumb() && !hasV8Ops()))
10573 return Match_InvalidOperand;
10574 break;
10575 default:
10576 break;
10577 }
10578
10579 for (unsigned I = 0; I < MCID.NumOperands; ++I)
10580 if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
10581 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
10582 const auto &Op = Inst.getOperand(I);
10583 if (!Op.isReg()) {
10584 // This can happen in awkward cases with tied operands, e.g. a
10585 // writeback load/store with a complex addressing mode in
10586 // which there's an output operand corresponding to the
10587 // updated written-back base register: the Tablegen-generated
10588 // AsmMatcher will have written a placeholder operand to that
10589 // slot in the form of an immediate 0, because it can't
10590 // generate the register part of the complex addressing-mode
10591 // operand ahead of time.
10592 continue;
10593 }
10594
10595 unsigned Reg = Op.getReg();
10596 if ((Reg == ARM::SP) && !hasV8Ops())
10597 return Match_RequiresV8;
10598 else if (Reg == ARM::PC)
10599 return Match_InvalidOperand;
10600 }
10601
10602 return Match_Success;
10603}
10604
10605namespace llvm {
10606
10607template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
10608 return true; // In an assembly source, no need to second-guess
10609}
10610
10611} // end namespace llvm
10612
10613// Returns true if Inst is unpredictable if it is in and IT block, but is not
10614// the last instruction in the block.
10615bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
10616 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
10617
10618 // All branch & call instructions terminate IT blocks with the exception of
10619 // SVC.
10620 if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
10621 MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
10622 return true;
10623
10624 // Any arithmetic instruction which writes to the PC also terminates the IT
10625 // block.
10626 if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
10627 return true;
10628
10629 return false;
10630}
10631
10632unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
10633 SmallVectorImpl<NearMissInfo> &NearMisses,
10634 bool MatchingInlineAsm,
10635 bool &EmitInITBlock,
10636 MCStreamer &Out) {
10637 // If we can't use an implicit IT block here, just match as normal.
10638 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
10639 return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
10640
10641 // Try to match the instruction in an extension of the current IT block (if
10642 // there is one).
10643 if (inImplicitITBlock()) {
10644 extendImplicitITBlock(ITState.Cond);
10645 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
10646 Match_Success) {
10647 // The match succeded, but we still have to check that the instruction is
10648 // valid in this implicit IT block.
10649 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
10650 if (MCID.isPredicable()) {
10651 ARMCC::CondCodes InstCond =
10652 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
10653 .getImm();
10654 ARMCC::CondCodes ITCond = currentITCond();
10655 if (InstCond == ITCond) {
10656 EmitInITBlock = true;
10657 return Match_Success;
10658 } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
10659 invertCurrentITCondition();
10660 EmitInITBlock = true;
10661 return Match_Success;
10662 }
10663 }
10664 }
10665 rewindImplicitITPosition();
10666 }
10667
10668 // Finish the current IT block, and try to match outside any IT block.
10669 flushPendingInstructions(Out);
10670 unsigned PlainMatchResult =
10671 MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
10672 if (PlainMatchResult == Match_Success) {
10673 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
10674 if (MCID.isPredicable()) {
10675 ARMCC::CondCodes InstCond =
10676 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
10677 .getImm();
10678 // Some forms of the branch instruction have their own condition code
10679 // fields, so can be conditionally executed without an IT block.
10680 if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
10681 EmitInITBlock = false;
10682 return Match_Success;
10683 }
10684 if (InstCond == ARMCC::AL) {
10685 EmitInITBlock = false;
10686 return Match_Success;
10687 }
10688 } else {
10689 EmitInITBlock = false;
10690 return Match_Success;
10691 }
10692 }
10693
10694 // Try to match in a new IT block. The matcher doesn't check the actual
10695 // condition, so we create an IT block with a dummy condition, and fix it up
10696 // once we know the actual condition.
10697 startImplicitITBlock();
10698 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
10699 Match_Success) {
10700 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
10701 if (MCID.isPredicable()) {
10702 ITState.Cond =
10703 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
10704 .getImm();
10705 EmitInITBlock = true;
10706 return Match_Success;
10707 }
10708 }
10709 discardImplicitITBlock();
10710
10711 // If none of these succeed, return the error we got when trying to match
10712 // outside any IT blocks.
10713 EmitInITBlock = false;
10714 return PlainMatchResult;
10715}
10716
10717static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
10718 unsigned VariantID = 0);
10719
10720static const char *getSubtargetFeatureName(uint64_t Val);
10721bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
10722 OperandVector &Operands,
10723 MCStreamer &Out, uint64_t &ErrorInfo,
10724 bool MatchingInlineAsm) {
10725 MCInst Inst;
10726 unsigned MatchResult;
10727 bool PendConditionalInstruction = false;
10728
10729 SmallVector<NearMissInfo, 4> NearMisses;
10730 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
10731 PendConditionalInstruction, Out);
10732
10733 switch (MatchResult) {
10734 case Match_Success:
10735 LLVM_DEBUG(dbgs() << "Parsed as: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
10736 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
10737 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
;
10738
10739 // Context sensitive operand constraints aren't handled by the matcher,
10740 // so check them here.
10741 if (validateInstruction(Inst, Operands)) {
10742 // Still progress the IT block, otherwise one wrong condition causes
10743 // nasty cascading errors.
10744 forwardITPosition();
10745 forwardVPTPosition();
10746 return true;
10747 }
10748
10749 { // processInstruction() updates inITBlock state, we need to save it away
10750 bool wasInITBlock = inITBlock();
10751
10752 // Some instructions need post-processing to, for example, tweak which
10753 // encoding is selected. Loop on it while changes happen so the
10754 // individual transformations can chain off each other. E.g.,
10755 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
10756 while (processInstruction(Inst, Operands, Out))
10757 LLVM_DEBUG(dbgs() << "Changed to: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
10758 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
10759 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
;
10760
10761 // Only after the instruction is fully processed, we can validate it
10762 if (wasInITBlock && hasV8Ops() && isThumb() &&
10763 !isV8EligibleForIT(&Inst)) {
10764 Warning(IDLoc, "deprecated instruction in IT block");
10765 }
10766 }
10767
10768 // Only move forward at the very end so that everything in validate
10769 // and process gets a consistent answer about whether we're in an IT
10770 // block.
10771 forwardITPosition();
10772 forwardVPTPosition();
10773
10774 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
10775 // doesn't actually encode.
10776 if (Inst.getOpcode() == ARM::ITasm)
10777 return false;
10778
10779 Inst.setLoc(IDLoc);
10780 if (PendConditionalInstruction) {
10781 PendingConditionalInsts.push_back(Inst);
10782 if (isITBlockFull() || isITBlockTerminator(Inst))
10783 flushPendingInstructions(Out);
10784 } else {
10785 Out.emitInstruction(Inst, getSTI());
10786 }
10787 return false;
10788 case Match_NearMisses:
10789 ReportNearMisses(NearMisses, IDLoc, Operands);
10790 return true;
10791 case Match_MnemonicFail: {
10792 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
10793 std::string Suggestion = ARMMnemonicSpellCheck(
10794 ((ARMOperand &)*Operands[0]).getToken(), FBS);
10795 return Error(IDLoc, "invalid instruction" + Suggestion,
10796 ((ARMOperand &)*Operands[0]).getLocRange());
10797 }
10798 }
10799
10800 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10800)
;
10801}
10802
10803/// parseDirective parses the arm specific directives
10804bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
10805 const MCObjectFileInfo::Environment Format =
10806 getContext().getObjectFileInfo()->getObjectFileType();
10807 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
10808 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
2
Assuming 'Format' is not equal to IsCOFF
10809
10810 std::string IDVal = DirectiveID.getIdentifier().lower();
10811 if (IDVal == ".word")
3
Taking false branch
10812 parseLiteralValues(4, DirectiveID.getLoc());
10813 else if (IDVal == ".short" || IDVal == ".hword")
4
Taking false branch
10814 parseLiteralValues(2, DirectiveID.getLoc());
10815 else if (IDVal == ".thumb")
5
Taking false branch
10816 parseDirectiveThumb(DirectiveID.getLoc());
10817 else if (IDVal == ".arm")
6
Taking false branch
10818 parseDirectiveARM(DirectiveID.getLoc());
10819 else if (IDVal == ".thumb_func")
7
Taking false branch
10820 parseDirectiveThumbFunc(DirectiveID.getLoc());
10821 else if (IDVal == ".code")
8
Taking false branch
10822 parseDirectiveCode(DirectiveID.getLoc());
10823 else if (IDVal == ".syntax")
9
Taking false branch
10824 parseDirectiveSyntax(DirectiveID.getLoc());
10825 else if (IDVal == ".unreq")
10
Taking false branch
10826 parseDirectiveUnreq(DirectiveID.getLoc());
10827 else if (IDVal == ".fnend")
11
Taking false branch
10828 parseDirectiveFnEnd(DirectiveID.getLoc());
10829 else if (IDVal == ".cantunwind")
12
Taking false branch
10830 parseDirectiveCantUnwind(DirectiveID.getLoc());
10831 else if (IDVal == ".personality")
13
Taking false branch
10832 parseDirectivePersonality(DirectiveID.getLoc());
10833 else if (IDVal == ".handlerdata")
14
Taking false branch
10834 parseDirectiveHandlerData(DirectiveID.getLoc());
10835 else if (IDVal == ".setfp")
15
Taking false branch
10836 parseDirectiveSetFP(DirectiveID.getLoc());
10837 else if (IDVal == ".pad")
16
Taking false branch
10838 parseDirectivePad(DirectiveID.getLoc());
10839 else if (IDVal == ".save")
17
Taking false branch
10840 parseDirectiveRegSave(DirectiveID.getLoc(), false);
10841 else if (IDVal == ".vsave")
18
Taking false branch
10842 parseDirectiveRegSave(DirectiveID.getLoc(), true);
10843 else if (IDVal == ".ltorg" || IDVal == ".pool")
19
Taking false branch
10844 parseDirectiveLtorg(DirectiveID.getLoc());
10845 else if (IDVal == ".even")
20
Taking false branch
10846 parseDirectiveEven(DirectiveID.getLoc());
10847 else if (IDVal == ".personalityindex")
21
Taking false branch
10848 parseDirectivePersonalityIndex(DirectiveID.getLoc());
10849 else if (IDVal == ".unwind_raw")
22
Taking false branch
10850 parseDirectiveUnwindRaw(DirectiveID.getLoc());
10851 else if (IDVal == ".movsp")
23
Taking false branch
10852 parseDirectiveMovSP(DirectiveID.getLoc());
10853 else if (IDVal == ".arch_extension")
24
Taking false branch
10854 parseDirectiveArchExtension(DirectiveID.getLoc());
10855 else if (IDVal == ".align")
25
Taking false branch
10856 return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
10857 else if (IDVal == ".thumb_set")
26
Taking false branch
10858 parseDirectiveThumbSet(DirectiveID.getLoc());
10859 else if (IDVal == ".inst")
27
Taking false branch
10860 parseDirectiveInst(DirectiveID.getLoc());
10861 else if (IDVal == ".inst.n")
28
Taking false branch
10862 parseDirectiveInst(DirectiveID.getLoc(), 'n');
10863 else if (IDVal == ".inst.w")
29
Taking false branch
10864 parseDirectiveInst(DirectiveID.getLoc(), 'w');
10865 else if (!IsMachO
29.1
'IsMachO' is false
29.1
'IsMachO' is false
&& !IsCOFF
29.2
'IsCOFF' is false
29.2
'IsCOFF' is false
) {
30
Taking true branch
10866 if (IDVal == ".arch")
31
Taking false branch
10867 parseDirectiveArch(DirectiveID.getLoc());
10868 else if (IDVal == ".cpu")
32
Taking false branch
10869 parseDirectiveCPU(DirectiveID.getLoc());
10870 else if (IDVal == ".eabi_attribute")
33
Taking true branch
10871 parseDirectiveEabiAttr(DirectiveID.getLoc());
34
Calling 'ARMAsmParser::parseDirectiveEabiAttr'
10872 else if (IDVal == ".fpu")
10873 parseDirectiveFPU(DirectiveID.getLoc());
10874 else if (IDVal == ".fnstart")
10875 parseDirectiveFnStart(DirectiveID.getLoc());
10876 else if (IDVal == ".object_arch")
10877 parseDirectiveObjectArch(DirectiveID.getLoc());
10878 else if (IDVal == ".tlsdescseq")
10879 parseDirectiveTLSDescSeq(DirectiveID.getLoc());
10880 else
10881 return true;
10882 } else
10883 return true;
10884 return false;
10885}
10886
10887/// parseLiteralValues
10888/// ::= .hword expression [, expression]*
10889/// ::= .short expression [, expression]*
10890/// ::= .word expression [, expression]*
10891bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
10892 auto parseOne = [&]() -> bool {
10893 const MCExpr *Value;
10894 if (getParser().parseExpression(Value))
10895 return true;
10896 getParser().getStreamer().emitValue(Value, Size, L);
10897 return false;
10898 };
10899 return (parseMany(parseOne));
10900}
10901
10902/// parseDirectiveThumb
10903/// ::= .thumb
10904bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
10905 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
10906 check(!hasThumb(), L, "target does not support Thumb mode"))
10907 return true;
10908
10909 if (!isThumb())
10910 SwitchMode();
10911
10912 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
10913 return false;
10914}
10915
10916/// parseDirectiveARM
10917/// ::= .arm
10918bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
10919 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
10920 check(!hasARM(), L, "target does not support ARM mode"))
10921 return true;
10922
10923 if (isThumb())
10924 SwitchMode();
10925 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
10926 return false;
10927}
10928
10929void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) {
10930 // We need to flush the current implicit IT block on a label, because it is
10931 // not legal to branch into an IT block.
10932 flushPendingInstructions(getStreamer());
10933}
10934
10935void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
10936 if (NextSymbolIsThumb) {
10937 getParser().getStreamer().emitThumbFunc(Symbol);
10938 NextSymbolIsThumb = false;
10939 }
10940}
10941
10942/// parseDirectiveThumbFunc
10943/// ::= .thumbfunc symbol_name
10944bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
10945 MCAsmParser &Parser = getParser();
10946 const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
10947 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
10948
10949 // Darwin asm has (optionally) function name after .thumb_func direction
10950 // ELF doesn't
10951
10952 if (IsMachO) {
10953 if (Parser.getTok().is(AsmToken::Identifier) ||
10954 Parser.getTok().is(AsmToken::String)) {
10955 MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
10956 Parser.getTok().getIdentifier());
10957 getParser().getStreamer().emitThumbFunc(Func);
10958 Parser.Lex();
10959 if (parseToken(AsmToken::EndOfStatement,
10960 "unexpected token in '.thumb_func' directive"))
10961 return true;
10962 return false;
10963 }
10964 }
10965
10966 if (parseToken(AsmToken::EndOfStatement,
10967 "unexpected token in '.thumb_func' directive"))
10968 return true;
10969
10970 NextSymbolIsThumb = true;
10971 return false;
10972}
10973
10974/// parseDirectiveSyntax
10975/// ::= .syntax unified | divided
10976bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
10977 MCAsmParser &Parser = getParser();
10978 const AsmToken &Tok = Parser.getTok();
10979 if (Tok.isNot(AsmToken::Identifier)) {
10980 Error(L, "unexpected token in .syntax directive");
10981 return false;
10982 }
10983
10984 StringRef Mode = Tok.getString();
10985 Parser.Lex();
10986 if (check(Mode == "divided" || Mode == "DIVIDED", L,
10987 "'.syntax divided' arm assembly not supported") ||
10988 check(Mode != "unified" && Mode != "UNIFIED", L,
10989 "unrecognized syntax mode in .syntax directive") ||
10990 parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10991 return true;
10992
10993 // TODO tell the MC streamer the mode
10994 // getParser().getStreamer().Emit???();
10995 return false;
10996}
10997
10998/// parseDirectiveCode
10999/// ::= .code 16 | 32
11000bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11001 MCAsmParser &Parser = getParser();
11002 const AsmToken &Tok = Parser.getTok();
11003 if (Tok.isNot(AsmToken::Integer))
11004 return Error(L, "unexpected token in .code directive");
11005 int64_t Val = Parser.getTok().getIntVal();
11006 if (Val != 16 && Val != 32) {
11007 Error(L, "invalid operand to .code directive");
11008 return false;
11009 }
11010 Parser.Lex();
11011
11012 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
11013 return true;
11014
11015 if (Val == 16) {
11016 if (!hasThumb())
11017 return Error(L, "target does not support Thumb mode");
11018
11019 if (!isThumb())
11020 SwitchMode();
11021 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11022 } else {
11023 if (!hasARM())
11024 return Error(L, "target does not support ARM mode");
11025
11026 if (isThumb())
11027 SwitchMode();
11028 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11029 }
11030
11031 return false;
11032}
11033
11034/// parseDirectiveReq
11035/// ::= name .req registername
11036bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11037 MCAsmParser &Parser = getParser();
11038 Parser.Lex(); // Eat the '.req' token.
11039 unsigned Reg;
11040 SMLoc SRegLoc, ERegLoc;
11041 if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11042 "register name expected") ||
11043 parseToken(AsmToken::EndOfStatement,
11044 "unexpected input in .req directive."))
11045 return true;
11046
11047 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11048 return Error(SRegLoc,
11049 "redefinition of '" + Name + "' does not match original.");
11050
11051 return false;
11052}
11053
11054/// parseDirectiveUneq
11055/// ::= .unreq registername
11056bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11057 MCAsmParser &Parser = getParser();
11058 if (Parser.getTok().isNot(AsmToken::Identifier))
11059 return Error(L, "unexpected input in .unreq directive.");
11060 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11061 Parser.Lex(); // Eat the identifier.
11062 if (parseToken(AsmToken::EndOfStatement,
11063 "unexpected input in '.unreq' directive"))
11064 return true;
11065 return false;
11066}
11067
11068// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11069// before, if supported by the new target, or emit mapping symbols for the mode
11070// switch.
11071void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11072 if (WasThumb != isThumb()) {
11073 if (WasThumb && hasThumb()) {
11074 // Stay in Thumb mode
11075 SwitchMode();
11076 } else if (!WasThumb && hasARM()) {
11077 // Stay in ARM mode
11078 SwitchMode();
11079 } else {
11080 // Mode switch forced, because the new arch doesn't support the old mode.
11081 getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11082 : MCAF_Code32);
11083 // Warn about the implcit mode switch. GAS does not switch modes here,
11084 // but instead stays in the old mode, reporting an error on any following
11085 // instructions as the mode does not exist on the target.
11086 Warning(Loc, Twine("new target does not support ") +
11087 (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11088 (!WasThumb ? "thumb" : "arm") + " mode");
11089 }
11090 }
11091}
11092
11093/// parseDirectiveArch
11094/// ::= .arch token
11095bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11096 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11097 ARM::ArchKind ID = ARM::parseArch(Arch);
11098
11099 if (ID == ARM::ArchKind::INVALID)
11100 return Error(L, "Unknown arch name");
11101
11102 bool WasThumb = isThumb();
11103 Triple T;
11104 MCSubtargetInfo &STI = copySTI();
11105 STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
11106 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11107 FixModeAfterArchChange(WasThumb, L);
11108
11109 getTargetStreamer().emitArch(ID);
11110 return false;
11111}
11112
11113/// parseDirectiveEabiAttr
11114/// ::= .eabi_attribute int, int [, "str"]
11115/// ::= .eabi_attribute Tag_name, int [, "str"]
11116bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11117 MCAsmParser &Parser = getParser();
11118 int64_t Tag;
11119 SMLoc TagLoc;
11120 TagLoc = Parser.getTok().getLoc();
11121 if (Parser.getTok().is(AsmToken::Identifier)) {
35
Calling 'AsmToken::is'
38
Returning from 'AsmToken::is'
39
Taking false branch
11122 StringRef Name = Parser.getTok().getIdentifier();
11123 Tag = ARMBuildAttrs::AttrTypeFromString(Name);
11124 if (Tag == -1) {
11125 Error(TagLoc, "attribute name not recognised: " + Name);
11126 return false;
11127 }
11128 Parser.Lex();
11129 } else {
11130 const MCExpr *AttrExpr;
11131
11132 TagLoc = Parser.getTok().getLoc();
11133 if (Parser.parseExpression(AttrExpr))
40
Assuming the condition is false
41
Taking false branch
11134 return true;
11135
11136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
42
Assuming 'AttrExpr' is not a 'MCConstantExpr'
43
'CE' initialized to a null pointer value
11137 if (check(!CE, TagLoc, "expected numeric constant"))
44
Assuming the condition is false
45
Taking false branch
11138 return true;
11139
11140 Tag = CE->getValue();
46
Called C++ object pointer is null
11141 }
11142
11143 if (Parser.parseToken(AsmToken::Comma, "comma expected"))
11144 return true;
11145
11146 StringRef StringValue = "";
11147 bool IsStringValue = false;
11148
11149 int64_t IntegerValue = 0;
11150 bool IsIntegerValue = false;
11151
11152 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11153 IsStringValue = true;
11154 else if (Tag == ARMBuildAttrs::compatibility) {
11155 IsStringValue = true;
11156 IsIntegerValue = true;
11157 } else if (Tag < 32 || Tag % 2 == 0)
11158 IsIntegerValue = true;
11159 else if (Tag % 2 == 1)
11160 IsStringValue = true;
11161 else
11162 llvm_unreachable("invalid tag type")::llvm::llvm_unreachable_internal("invalid tag type", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11162)
;
11163
11164 if (IsIntegerValue) {
11165 const MCExpr *ValueExpr;
11166 SMLoc ValueExprLoc = Parser.getTok().getLoc();
11167 if (Parser.parseExpression(ValueExpr))
11168 return true;
11169
11170 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11171 if (!CE)
11172 return Error(ValueExprLoc, "expected numeric constant");
11173 IntegerValue = CE->getValue();
11174 }
11175
11176 if (Tag == ARMBuildAttrs::compatibility) {
11177 if (Parser.parseToken(AsmToken::Comma, "comma expected"))
11178 return true;
11179 }
11180
11181 if (IsStringValue) {
11182 if (Parser.getTok().isNot(AsmToken::String))
11183 return Error(Parser.getTok().getLoc(), "bad string constant");
11184
11185 StringValue = Parser.getTok().getStringContents();
11186 Parser.Lex();
11187 }
11188
11189 if (Parser.parseToken(AsmToken::EndOfStatement,
11190 "unexpected token in '.eabi_attribute' directive"))
11191 return true;
11192
11193 if (IsIntegerValue && IsStringValue) {
11194 assert(Tag == ARMBuildAttrs::compatibility)((Tag == ARMBuildAttrs::compatibility) ? static_cast<void>
(0) : __assert_fail ("Tag == ARMBuildAttrs::compatibility", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11194, __PRETTY_FUNCTION__))
;
11195 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11196 } else if (IsIntegerValue)
11197 getTargetStreamer().emitAttribute(Tag, IntegerValue);
11198 else if (IsStringValue)
11199 getTargetStreamer().emitTextAttribute(Tag, StringValue);
11200 return false;
11201}
11202
11203/// parseDirectiveCPU
11204/// ::= .cpu str
11205bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11206 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11207 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11208
11209 // FIXME: This is using table-gen data, but should be moved to
11210 // ARMTargetParser once that is table-gen'd.
11211 if (!getSTI().isCPUStringValid(CPU))
11212 return Error(L, "Unknown CPU name");
11213
11214 bool WasThumb = isThumb();
11215 MCSubtargetInfo &STI = copySTI();
11216 STI.setDefaultFeatures(CPU, "");
11217 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11218 FixModeAfterArchChange(WasThumb, L);
11219
11220 return false;
11221}
11222
11223/// parseDirectiveFPU
11224/// ::= .fpu str
11225bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11226 SMLoc FPUNameLoc = getTok().getLoc();
11227 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11228
11229 unsigned ID = ARM::parseFPU(FPU);
11230 std::vector<StringRef> Features;
11231 if (!ARM::getFPUFeatures(ID, Features))
11232 return Error(FPUNameLoc, "Unknown FPU name");
11233
11234 MCSubtargetInfo &STI = copySTI();
11235 for (auto Feature : Features)
11236 STI.ApplyFeatureFlag(Feature);
11237 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11238
11239 getTargetStreamer().emitFPU(ID);
11240 return false;
11241}
11242
11243/// parseDirectiveFnStart
11244/// ::= .fnstart
11245bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11246 if (parseToken(AsmToken::EndOfStatement,
11247 "unexpected token in '.fnstart' directive"))
11248 return true;
11249
11250 if (UC.hasFnStart()) {
11251 Error(L, ".fnstart starts before the end of previous one");
11252 UC.emitFnStartLocNotes();
11253 return true;
11254 }
11255
11256 // Reset the unwind directives parser state
11257 UC.reset();
11258
11259 getTargetStreamer().emitFnStart();
11260
11261 UC.recordFnStart(L);
11262 return false;
11263}
11264
11265/// parseDirectiveFnEnd
11266/// ::= .fnend
11267bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11268 if (parseToken(AsmToken::EndOfStatement,
11269 "unexpected token in '.fnend' directive"))
11270 return true;
11271 // Check the ordering of unwind directives
11272 if (!UC.hasFnStart())
11273 return Error(L, ".fnstart must precede .fnend directive");
11274
11275 // Reset the unwind directives parser state
11276 getTargetStreamer().emitFnEnd();
11277
11278 UC.reset();
11279 return false;
11280}
11281
11282/// parseDirectiveCantUnwind
11283/// ::= .cantunwind
11284bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11285 if (parseToken(AsmToken::EndOfStatement,
11286 "unexpected token in '.cantunwind' directive"))
11287 return true;
11288
11289 UC.recordCantUnwind(L);
11290 // Check the ordering of unwind directives
11291 if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11292 return true;
11293
11294 if (UC.hasHandlerData()) {
11295 Error(L, ".cantunwind can't be used with .handlerdata directive");
11296 UC.emitHandlerDataLocNotes();
11297 return true;
11298 }
11299 if (UC.hasPersonality()) {
11300 Error(L, ".cantunwind can't be used with .personality directive");
11301 UC.emitPersonalityLocNotes();
11302 return true;
11303 }
11304
11305 getTargetStreamer().emitCantUnwind();
11306 return false;
11307}
11308
11309/// parseDirectivePersonality
11310/// ::= .personality name
11311bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11312 MCAsmParser &Parser = getParser();
11313 bool HasExistingPersonality = UC.hasPersonality();
11314
11315 // Parse the name of the personality routine
11316 if (Parser.getTok().isNot(AsmToken::Identifier))
11317 return Error(L, "unexpected input in .personality directive.");
11318 StringRef Name(Parser.getTok().getIdentifier());
11319 Parser.Lex();
11320
11321 if (parseToken(AsmToken::EndOfStatement,
11322 "unexpected token in '.personality' directive"))
11323 return true;
11324
11325 UC.recordPersonality(L);
11326
11327 // Check the ordering of unwind directives
11328 if (!UC.hasFnStart())
11329 return Error(L, ".fnstart must precede .personality directive");
11330 if (UC.cantUnwind()) {
11331 Error(L, ".personality can't be used with .cantunwind directive");
11332 UC.emitCantUnwindLocNotes();
11333 return true;
11334 }
11335 if (UC.hasHandlerData()) {
11336 Error(L, ".personality must precede .handlerdata directive");
11337 UC.emitHandlerDataLocNotes();
11338 return true;
11339 }
11340 if (HasExistingPersonality) {
11341 Error(L, "multiple personality directives");
11342 UC.emitPersonalityLocNotes();
11343 return true;
11344 }
11345
11346 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
11347 getTargetStreamer().emitPersonality(PR);
11348 return false;
11349}
11350
11351/// parseDirectiveHandlerData
11352/// ::= .handlerdata
11353bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
11354 if (parseToken(AsmToken::EndOfStatement,
11355 "unexpected token in '.handlerdata' directive"))
11356 return true;
11357
11358 UC.recordHandlerData(L);
11359 // Check the ordering of unwind directives
11360 if (!UC.hasFnStart())
11361 return Error(L, ".fnstart must precede .personality directive");
11362 if (UC.cantUnwind()) {
11363 Error(L, ".handlerdata can't be used with .cantunwind directive");
11364 UC.emitCantUnwindLocNotes();
11365 return true;
11366 }
11367
11368 getTargetStreamer().emitHandlerData();
11369 return false;
11370}
11371
11372/// parseDirectiveSetFP
11373/// ::= .setfp fpreg, spreg [, offset]
11374bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
11375 MCAsmParser &Parser = getParser();
11376 // Check the ordering of unwind directives
11377 if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
11378 check(UC.hasHandlerData(), L,
11379 ".setfp must precede .handlerdata directive"))
11380 return true;
11381
11382 // Parse fpreg
11383 SMLoc FPRegLoc = Parser.getTok().getLoc();
11384 int FPReg = tryParseRegister();
11385
11386 if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
11387 Parser.parseToken(AsmToken::Comma, "comma expected"))
11388 return true;
11389
11390 // Parse spreg
11391 SMLoc SPRegLoc = Parser.getTok().getLoc();
11392 int SPReg = tryParseRegister();
11393 if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
11394 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11395 "register should be either $sp or the latest fp register"))
11396 return true;
11397
11398 // Update the frame pointer register
11399 UC.saveFPReg(FPReg);
11400
11401 // Parse offset
11402 int64_t Offset = 0;
11403 if (Parser.parseOptionalToken(AsmToken::Comma)) {
11404 if (Parser.getTok().isNot(AsmToken::Hash) &&
11405 Parser.getTok().isNot(AsmToken::Dollar))
11406 return Error(Parser.getTok().getLoc(), "'#' expected");
11407 Parser.Lex(); // skip hash token.
11408
11409 const MCExpr *OffsetExpr;
11410 SMLoc ExLoc = Parser.getTok().getLoc();
11411 SMLoc EndLoc;
11412 if (getParser().parseExpression(OffsetExpr, EndLoc))
11413 return Error(ExLoc, "malformed setfp offset");
11414 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11415 if (check(!CE, ExLoc, "setfp offset must be an immediate"))
11416 return true;
11417 Offset = CE->getValue();
11418 }
11419
11420 if (Parser.parseToken(AsmToken::EndOfStatement))
11421 return true;
11422
11423 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
11424 static_cast<unsigned>(SPReg), Offset);
11425 return false;
11426}
11427
11428/// parseDirective
11429/// ::= .pad offset
11430bool ARMAsmParser::parseDirectivePad(SMLoc L) {
11431 MCAsmParser &Parser = getParser();
11432 // Check the ordering of unwind directives
11433 if (!UC.hasFnStart())
11434 return Error(L, ".fnstart must precede .pad directive");
11435 if (UC.hasHandlerData())
11436 return Error(L, ".pad must precede .handlerdata directive");
11437
11438 // Parse the offset
11439 if (Parser.getTok().isNot(AsmToken::Hash) &&
11440 Parser.getTok().isNot(AsmToken::Dollar))
11441 return Error(Parser.getTok().getLoc(), "'#' expected");
11442 Parser.Lex(); // skip hash token.
11443
11444 const MCExpr *OffsetExpr;
11445 SMLoc ExLoc = Parser.getTok().getLoc();
11446 SMLoc EndLoc;
11447 if (getParser().parseExpression(OffsetExpr, EndLoc))
11448 return Error(ExLoc, "malformed pad offset");
11449 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11450 if (!CE)
11451 return Error(ExLoc, "pad offset must be an immediate");
11452
11453 if (parseToken(AsmToken::EndOfStatement,
11454 "unexpected token in '.pad' directive"))
11455 return true;
11456
11457 getTargetStreamer().emitPad(CE->getValue());
11458 return false;
11459}
11460
11461/// parseDirectiveRegSave
11462/// ::= .save { registers }
11463/// ::= .vsave { registers }
11464bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
11465 // Check the ordering of unwind directives
11466 if (!UC.hasFnStart())
11467 return Error(L, ".fnstart must precede .save or .vsave directives");
11468 if (UC.hasHandlerData())
11469 return Error(L, ".save or .vsave must precede .handlerdata directive");
11470
11471 // RAII object to make sure parsed operands are deleted.
11472 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
11473
11474 // Parse the register list
11475 if (parseRegisterList(Operands) ||
11476 parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
11477 return true;
11478 ARMOperand &Op = (ARMOperand &)*Operands[0];
11479 if (!IsVector && !Op.isRegList())
11480 return Error(L, ".save expects GPR registers");
11481 if (IsVector && !Op.isDPRRegList())
11482 return Error(L, ".vsave expects DPR registers");
11483
11484 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
11485 return false;
11486}
11487
11488/// parseDirectiveInst
11489/// ::= .inst opcode [, ...]
11490/// ::= .inst.n opcode [, ...]
11491/// ::= .inst.w opcode [, ...]
11492bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
11493 int Width = 4;
11494
11495 if (isThumb()) {
11496 switch (Suffix) {
11497 case 'n':
11498 Width = 2;
11499 break;
11500 case 'w':
11501 break;
11502 default:
11503 Width = 0;
11504 break;
11505 }
11506 } else {
11507 if (Suffix)
11508 return Error(Loc, "width suffixes are invalid in ARM mode");
11509 }
11510
11511 auto parseOne = [&]() -> bool {
11512 const MCExpr *Expr;
11513 if (getParser().parseExpression(Expr))
11514 return true;
11515 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
11516 if (!Value) {
11517 return Error(Loc, "expected constant expression");
11518 }
11519
11520 char CurSuffix = Suffix;
11521 switch (Width) {
11522 case 2:
11523 if (Value->getValue() > 0xffff)
11524 return Error(Loc, "inst.n operand is too big, use inst.w instead");
11525 break;
11526 case 4:
11527 if (Value->getValue() > 0xffffffff)
11528 return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
11529 " operand is too big");
11530 break;
11531 case 0:
11532 // Thumb mode, no width indicated. Guess from the opcode, if possible.
11533 if (Value->getValue() < 0xe800)
11534 CurSuffix = 'n';
11535 else if (Value->getValue() >= 0xe8000000)
11536 CurSuffix = 'w';
11537 else
11538 return Error(Loc, "cannot determine Thumb instruction size, "
11539 "use inst.n/inst.w instead");
11540 break;
11541 default:
11542 llvm_unreachable("only supported widths are 2 and 4")::llvm::llvm_unreachable_internal("only supported widths are 2 and 4"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11542)
;
11543 }
11544
11545 getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
11546 return false;
11547 };
11548
11549 if (parseOptionalToken(AsmToken::EndOfStatement))
11550 return Error(Loc, "expected expression following directive");
11551 if (parseMany(parseOne))
11552 return true;
11553 return false;
11554}
11555
11556/// parseDirectiveLtorg
11557/// ::= .ltorg | .pool
11558bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
11559 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
11560 return true;
11561 getTargetStreamer().emitCurrentConstantPool();
11562 return false;
11563}
11564
11565bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
11566 const MCSection *Section = getStreamer().getCurrentSectionOnly();
11567
11568 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
11569 return true;
11570
11571 if (!Section) {
11572 getStreamer().InitSections(false);
11573 Section = getStreamer().getCurrentSectionOnly();
11574 }
11575
11576 assert(Section && "must have section to emit alignment")((Section && "must have section to emit alignment") ?
static_cast<void> (0) : __assert_fail ("Section && \"must have section to emit alignment\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11576, __PRETTY_FUNCTION__))
;
11577 if (Section->UseCodeAlign())
11578 getStreamer().emitCodeAlignment(2);
11579 else
11580 getStreamer().emitValueToAlignment(2);
11581
11582 return false;
11583}
11584
11585/// parseDirectivePersonalityIndex
11586/// ::= .personalityindex index
11587bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
11588 MCAsmParser &Parser = getParser();
11589 bool HasExistingPersonality = UC.hasPersonality();
11590
11591 const MCExpr *IndexExpression;
11592 SMLoc IndexLoc = Parser.getTok().getLoc();
11593 if (Parser.parseExpression(IndexExpression) ||
11594 parseToken(AsmToken::EndOfStatement,
11595 "unexpected token in '.personalityindex' directive")) {
11596 return true;
11597 }
11598
11599 UC.recordPersonalityIndex(L);
11600
11601 if (!UC.hasFnStart()) {
11602 return Error(L, ".fnstart must precede .personalityindex directive");
11603 }
11604 if (UC.cantUnwind()) {
11605 Error(L, ".personalityindex cannot be used with .cantunwind");
11606 UC.emitCantUnwindLocNotes();
11607 return true;
11608 }
11609 if (UC.hasHandlerData()) {
11610 Error(L, ".personalityindex must precede .handlerdata directive");
11611 UC.emitHandlerDataLocNotes();
11612 return true;
11613 }
11614 if (HasExistingPersonality) {
11615 Error(L, "multiple personality directives");
11616 UC.emitPersonalityLocNotes();
11617 return true;
11618 }
11619
11620 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
11621 if (!CE)
11622 return Error(IndexLoc, "index must be a constant number");
11623 if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
11624 return Error(IndexLoc,
11625 "personality routine index should be in range [0-3]");
11626
11627 getTargetStreamer().emitPersonalityIndex(CE->getValue());
11628 return false;
11629}
11630
11631/// parseDirectiveUnwindRaw
11632/// ::= .unwind_raw offset, opcode [, opcode...]
11633bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
11634 MCAsmParser &Parser = getParser();
11635 int64_t StackOffset;
11636 const MCExpr *OffsetExpr;
11637 SMLoc OffsetLoc = getLexer().getLoc();
11638
11639 if (!UC.hasFnStart())
11640 return Error(L, ".fnstart must precede .unwind_raw directives");
11641 if (getParser().parseExpression(OffsetExpr))
11642 return Error(OffsetLoc, "expected expression");
11643
11644 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11645 if (!CE)
11646 return Error(OffsetLoc, "offset must be a constant");
11647
11648 StackOffset = CE->getValue();
11649
11650 if (Parser.parseToken(AsmToken::Comma, "expected comma"))
11651 return true;
11652
11653 SmallVector<uint8_t, 16> Opcodes;
11654
11655 auto parseOne = [&]() -> bool {
11656 const MCExpr *OE = nullptr;
11657 SMLoc OpcodeLoc = getLexer().getLoc();
11658 if (check(getLexer().is(AsmToken::EndOfStatement) ||
11659 Parser.parseExpression(OE),
11660 OpcodeLoc, "expected opcode expression"))
11661 return true;
11662 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
11663 if (!OC)
11664 return Error(OpcodeLoc, "opcode value must be a constant");
11665 const int64_t Opcode = OC->getValue();
11666 if (Opcode & ~0xff)
11667 return Error(OpcodeLoc, "invalid opcode");
11668 Opcodes.push_back(uint8_t(Opcode));
11669 return false;
11670 };
11671
11672 // Must have at least 1 element
11673 SMLoc OpcodeLoc = getLexer().getLoc();
11674 if (parseOptionalToken(AsmToken::EndOfStatement))
11675 return Error(OpcodeLoc, "expected opcode expression");
11676 if (parseMany(parseOne))
11677 return true;
11678
11679 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
11680 return false;
11681}
11682
11683/// parseDirectiveTLSDescSeq
11684/// ::= .tlsdescseq tls-variable
11685bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
11686 MCAsmParser &Parser = getParser();
11687
11688 if (getLexer().isNot(AsmToken::Identifier))
11689 return TokError("expected variable after '.tlsdescseq' directive");
11690
11691 const MCSymbolRefExpr *SRE =
11692 MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
11693 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
11694 Lex();
11695
11696 if (parseToken(AsmToken::EndOfStatement,
11697 "unexpected token in '.tlsdescseq' directive"))
11698 return true;
11699
11700 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
11701 return false;
11702}
11703
11704/// parseDirectiveMovSP
11705/// ::= .movsp reg [, #offset]
11706bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
11707 MCAsmParser &Parser = getParser();
11708 if (!UC.hasFnStart())
11709 return Error(L, ".fnstart must precede .movsp directives");
11710 if (UC.getFPReg() != ARM::SP)
11711 return Error(L, "unexpected .movsp directive");
11712
11713 SMLoc SPRegLoc = Parser.getTok().getLoc();
11714 int SPReg = tryParseRegister();
11715 if (SPReg == -1)
11716 return Error(SPRegLoc, "register expected");
11717 if (SPReg == ARM::SP || SPReg == ARM::PC)
11718 return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
11719
11720 int64_t Offset = 0;
11721 if (Parser.parseOptionalToken(AsmToken::Comma)) {
11722 if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
11723 return true;
11724
11725 const MCExpr *OffsetExpr;
11726 SMLoc OffsetLoc = Parser.getTok().getLoc();
11727
11728 if (Parser.parseExpression(OffsetExpr))
11729 return Error(OffsetLoc, "malformed offset expression");
11730
11731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11732 if (!CE)
11733 return Error(OffsetLoc, "offset must be an immediate constant");
11734
11735 Offset = CE->getValue();
11736 }
11737
11738 if (parseToken(AsmToken::EndOfStatement,
11739 "unexpected token in '.movsp' directive"))
11740 return true;
11741
11742 getTargetStreamer().emitMovSP(SPReg, Offset);
11743 UC.saveFPReg(SPReg);
11744
11745 return false;
11746}
11747
11748/// parseDirectiveObjectArch
11749/// ::= .object_arch name
11750bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
11751 MCAsmParser &Parser = getParser();
11752 if (getLexer().isNot(AsmToken::Identifier))
11753 return Error(getLexer().getLoc(), "unexpected token");
11754
11755 StringRef Arch = Parser.getTok().getString();
11756 SMLoc ArchLoc = Parser.getTok().getLoc();
11757 Lex();
11758
11759 ARM::ArchKind ID = ARM::parseArch(Arch);
11760
11761 if (ID == ARM::ArchKind::INVALID)
11762 return Error(ArchLoc, "unknown architecture '" + Arch + "'");
11763 if (parseToken(AsmToken::EndOfStatement))
11764 return true;
11765
11766 getTargetStreamer().emitObjectArch(ID);
11767 return false;
11768}
11769
11770/// parseDirectiveAlign
11771/// ::= .align
11772bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
11773 // NOTE: if this is not the end of the statement, fall back to the target
11774 // agnostic handling for this directive which will correctly handle this.
11775 if (parseOptionalToken(AsmToken::EndOfStatement)) {
11776 // '.align' is target specifically handled to mean 2**2 byte alignment.
11777 const MCSection *Section = getStreamer().getCurrentSectionOnly();
11778 assert(Section && "must have section to emit alignment")((Section && "must have section to emit alignment") ?
static_cast<void> (0) : __assert_fail ("Section && \"must have section to emit alignment\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11778, __PRETTY_FUNCTION__))
;
11779 if (Section->UseCodeAlign())
11780 getStreamer().emitCodeAlignment(4, 0);
11781 else
11782 getStreamer().emitValueToAlignment(4, 0, 1, 0);
11783 return false;
11784 }
11785 return true;
11786}
11787
11788/// parseDirectiveThumbSet
11789/// ::= .thumb_set name, value
11790bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
11791 MCAsmParser &Parser = getParser();
11792
11793 StringRef Name;
11794 if (check(Parser.parseIdentifier(Name),
11795 "expected identifier after '.thumb_set'") ||
11796 parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
11797 return true;
11798
11799 MCSymbol *Sym;
11800 const MCExpr *Value;
11801 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
11802 Parser, Sym, Value))
11803 return true;
11804
11805 getTargetStreamer().emitThumbSet(Sym, Value);
11806 return false;
11807}
11808
11809/// Force static initialization.
11810extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__ ((visibility("default"))) void LLVMInitializeARMAsmParser() {
11811 RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
11812 RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
11813 RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
11814 RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
11815}
11816
11817#define GET_REGISTER_MATCHER
11818#define GET_SUBTARGET_FEATURE_NAME
11819#define GET_MATCHER_IMPLEMENTATION
11820#define GET_MNEMONIC_SPELL_CHECKER
11821#include "ARMGenAsmMatcher.inc"
11822
11823// Some diagnostics need to vary with subtarget features, so they are handled
11824// here. For example, the DPR class has either 16 or 32 registers, depending
11825// on the FPU available.
11826const char *
11827ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
11828 switch (MatchError) {
11829 // rGPR contains sp starting with ARMv8.
11830 case Match_rGPR:
11831 return hasV8Ops() ? "operand must be a register in range [r0, r14]"
11832 : "operand must be a register in range [r0, r12] or r14";
11833 // DPR contains 16 registers for some FPUs, and 32 for others.
11834 case Match_DPR:
11835 return hasD32() ? "operand must be a register in range [d0, d31]"
11836 : "operand must be a register in range [d0, d15]";
11837 case Match_DPR_RegList:
11838 return hasD32() ? "operand must be a list of registers in range [d0, d31]"
11839 : "operand must be a list of registers in range [d0, d15]";
11840
11841 // For all other diags, use the static string from tablegen.
11842 default:
11843 return getMatchKindDiag(MatchError);
11844 }
11845}
11846
11847// Process the list of near-misses, throwing away ones we don't want to report
11848// to the user, and converting the rest to a source location and string that
11849// should be reported.
11850void
11851ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
11852 SmallVectorImpl<NearMissMessage> &NearMissesOut,
11853 SMLoc IDLoc, OperandVector &Operands) {
11854 // TODO: If operand didn't match, sub in a dummy one and run target
11855 // predicate, so that we can avoid reporting near-misses that are invalid?
11856 // TODO: Many operand types dont have SuperClasses set, so we report
11857 // redundant ones.
11858 // TODO: Some operands are superclasses of registers (e.g.
11859 // MCK_RegShiftedImm), we don't have any way to represent that currently.
11860 // TODO: This is not all ARM-specific, can some of it be factored out?
11861
11862 // Record some information about near-misses that we have already seen, so
11863 // that we can avoid reporting redundant ones. For example, if there are
11864 // variants of an instruction that take 8- and 16-bit immediates, we want
11865 // to only report the widest one.
11866 std::multimap<unsigned, unsigned> OperandMissesSeen;
11867 SmallSet<FeatureBitset, 4> FeatureMissesSeen;
11868 bool ReportedTooFewOperands = false;
11869
11870 // Process the near-misses in reverse order, so that we see more general ones
11871 // first, and so can avoid emitting more specific ones.
11872 for (NearMissInfo &I : reverse(NearMissesIn)) {
11873 switch (I.getKind()) {
11874 case NearMissInfo::NearMissOperand: {
11875 SMLoc OperandLoc =
11876 ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
11877 const char *OperandDiag =
11878 getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
11879
11880 // If we have already emitted a message for a superclass, don't also report
11881 // the sub-class. We consider all operand classes that we don't have a
11882 // specialised diagnostic for to be equal for the propose of this check,
11883 // so that we don't report the generic error multiple times on the same
11884 // operand.
11885 unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
11886 auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
11887 if (std::any_of(PrevReports.first, PrevReports.second,
11888 [DupCheckMatchClass](
11889 const std::pair<unsigned, unsigned> Pair) {
11890 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
11891 return Pair.second == DupCheckMatchClass;
11892 else
11893 return isSubclass((MatchClassKind)DupCheckMatchClass,
11894 (MatchClassKind)Pair.second);
11895 }))
11896 break;
11897 OperandMissesSeen.insert(
11898 std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
11899
11900 NearMissMessage Message;
11901 Message.Loc = OperandLoc;
11902 if (OperandDiag) {
11903 Message.Message = OperandDiag;
11904 } else if (I.getOperandClass() == InvalidMatchClass) {
11905 Message.Message = "too many operands for instruction";
11906 } else {
11907 Message.Message = "invalid operand for instruction";
11908 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
11909 dbgs() << "Missing diagnostic string for operand class "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
11910 << getMatchClassName((MatchClassKind)I.getOperandClass())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
11911 << I.getOperandClass() << ", error " << I.getOperandError()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
11912 << ", opcode " << MII.getName(I.getOpcode()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
;
11913 }
11914 NearMissesOut.emplace_back(Message);
11915 break;
11916 }
11917 case NearMissInfo::NearMissFeature: {
11918 const FeatureBitset &MissingFeatures = I.getFeatures();
11919 // Don't report the same set of features twice.
11920 if (FeatureMissesSeen.count(MissingFeatures))
11921 break;
11922 FeatureMissesSeen.insert(MissingFeatures);
11923
11924 // Special case: don't report a feature set which includes arm-mode for
11925 // targets that don't have ARM mode.
11926 if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
11927 break;
11928 // Don't report any near-misses that both require switching instruction
11929 // set, and adding other subtarget features.
11930 if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
11931 MissingFeatures.count() > 1)
11932 break;
11933 if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
11934 MissingFeatures.count() > 1)
11935 break;
11936 if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
11937 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
11938 Feature_IsThumbBit})).any())
11939 break;
11940 if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
11941 break;
11942
11943 NearMissMessage Message;
11944 Message.Loc = IDLoc;
11945 raw_svector_ostream OS(Message.Message);
11946
11947 OS << "instruction requires:";
11948 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
11949 if (MissingFeatures.test(i))
11950 OS << ' ' << getSubtargetFeatureName(i);
11951
11952 NearMissesOut.emplace_back(Message);
11953
11954 break;
11955 }
11956 case NearMissInfo::NearMissPredicate: {
11957 NearMissMessage Message;
11958 Message.Loc = IDLoc;
11959 switch (I.getPredicateError()) {
11960 case Match_RequiresNotITBlock:
11961 Message.Message = "flag setting instruction only valid outside IT block";
11962 break;
11963 case Match_RequiresITBlock:
11964 Message.Message = "instruction only valid inside IT block";
11965 break;
11966 case Match_RequiresV6:
11967 Message.Message = "instruction variant requires ARMv6 or later";
11968 break;
11969 case Match_RequiresThumb2:
11970 Message.Message = "instruction variant requires Thumb2";
11971 break;
11972 case Match_RequiresV8:
11973 Message.Message = "instruction variant requires ARMv8 or later";
11974 break;
11975 case Match_RequiresFlagSetting:
11976 Message.Message = "no flag-preserving variant of this instruction available";
11977 break;
11978 case Match_InvalidOperand:
11979 Message.Message = "invalid operand for instruction";
11980 break;
11981 default:
11982 llvm_unreachable("Unhandled target predicate error")::llvm::llvm_unreachable_internal("Unhandled target predicate error"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11982)
;
11983 break;
11984 }
11985 NearMissesOut.emplace_back(Message);
11986 break;
11987 }
11988 case NearMissInfo::NearMissTooFewOperands: {
11989 if (!ReportedTooFewOperands) {
11990 SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
11991 NearMissesOut.emplace_back(NearMissMessage{
11992 EndLoc, StringRef("too few operands for instruction")});
11993 ReportedTooFewOperands = true;
11994 }
11995 break;
11996 }
11997 case NearMissInfo::NoNearMiss:
11998 // This should never leave the matcher.
11999 llvm_unreachable("not a near-miss")::llvm::llvm_unreachable_internal("not a near-miss", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11999)
;
12000 break;
12001 }
12002 }
12003}
12004
12005void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12006 SMLoc IDLoc, OperandVector &Operands) {
12007 SmallVector<NearMissMessage, 4> Messages;
12008 FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12009
12010 if (Messages.size() == 0) {
12011 // No near-misses were found, so the best we can do is "invalid
12012 // instruction".
12013 Error(IDLoc, "invalid instruction");
12014 } else if (Messages.size() == 1) {
12015 // One near miss was found, report it as the sole error.
12016 Error(Messages[0].Loc, Messages[0].Message);
12017 } else {
12018 // More than one near miss, so report a generic "invalid instruction"
12019 // error, followed by notes for each of the near-misses.
12020 Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12021 for (auto &M : Messages) {
12022 Note(M.Loc, M.Message);
12023 }
12024 }
12025}
12026
12027/// parseDirectiveArchExtension
12028/// ::= .arch_extension [no]feature
12029bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12030 // FIXME: This structure should be moved inside ARMTargetParser
12031 // when we start to table-generate them, and we can use the ARM
12032 // flags below, that were generated by table-gen.
12033 static const struct {
12034 const uint64_t Kind;
12035 const FeatureBitset ArchCheck;
12036 const FeatureBitset Features;
12037 } Extensions[] = {
12038 { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} },
12039 { ARM::AEK_CRYPTO, {Feature_HasV8Bit},
12040 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
12041 { ARM::AEK_FP, {Feature_HasV8Bit},
12042 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8} },
12043 { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12044 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12045 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
12046 { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit},
12047 {ARM::FeatureMP} },
12048 { ARM::AEK_SIMD, {Feature_HasV8Bit},
12049 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8} },
12050 { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} },
12051 // FIXME: Only available in A-class, isel not predicated
12052 { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} },
12053 { ARM::AEK_FP16, {Feature_HasV8_2aBit},
12054 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
12055 { ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} },
12056 { ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB} },
12057 // FIXME: Unsupported extensions.
12058 { ARM::AEK_OS, {}, {} },
12059 { ARM::AEK_IWMMXT, {}, {} },
12060 { ARM::AEK_IWMMXT2, {}, {} },
12061 { ARM::AEK_MAVERICK, {}, {} },
12062 { ARM::AEK_XSCALE, {}, {} },
12063 };
12064
12065 MCAsmParser &Parser = getParser();
12066
12067 if (getLexer().isNot(AsmToken::Identifier))
12068 return Error(getLexer().getLoc(), "expected architecture extension name");
12069
12070 StringRef Name = Parser.getTok().getString();
12071 SMLoc ExtLoc = Parser.getTok().getLoc();
12072 Lex();
12073
12074 if (parseToken(AsmToken::EndOfStatement,
12075 "unexpected token in '.arch_extension' directive"))
12076 return true;
12077
12078 bool EnableFeature = true;
12079 if (Name.startswith_lower("no")) {
12080 EnableFeature = false;
12081 Name = Name.substr(2);
12082 }
12083 uint64_t FeatureKind = ARM::parseArchExt(Name);
12084 if (FeatureKind == ARM::AEK_INVALID)
12085 return Error(ExtLoc, "unknown architectural extension: " + Name);
12086
12087 for (const auto &Extension : Extensions) {
12088 if (Extension.Kind != FeatureKind)
12089 continue;
12090
12091 if (Extension.Features.none())
12092 return Error(ExtLoc, "unsupported architectural extension: " + Name);
12093
12094 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12095 return Error(ExtLoc, "architectural extension '" + Name +
12096 "' is not "
12097 "allowed for the current base architecture");
12098
12099 MCSubtargetInfo &STI = copySTI();
12100 if (EnableFeature) {
12101 STI.SetFeatureBitsTransitively(Extension.Features);
12102 } else {
12103 STI.ClearFeatureBitsTransitively(Extension.Features);
12104 }
12105 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12106 setAvailableFeatures(Features);
12107 return false;
12108 }
12109
12110 return Error(ExtLoc, "unknown architectural extension: " + Name);
12111}
12112
12113// Define this matcher function after the auto-generated include so we
12114// have the match class enum definitions.
12115unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12116 unsigned Kind) {
12117 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12118 // If the kind is a token for a literal immediate, check if our asm
12119 // operand matches. This is for InstAliases which have a fixed-value
12120 // immediate in the syntax.
12121 switch (Kind) {
12122 default: break;
12123 case MCK__HASH_0:
12124 if (Op.isImm())
12125 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12126 if (CE->getValue() == 0)
12127 return Match_Success;
12128 break;
12129 case MCK__HASH_8:
12130 if (Op.isImm())
12131 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12132 if (CE->getValue() == 8)
12133 return Match_Success;
12134 break;
12135 case MCK__HASH_16:
12136 if (Op.isImm())
12137 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12138 if (CE->getValue() == 16)
12139 return Match_Success;
12140 break;
12141 case MCK_ModImm:
12142 if (Op.isImm()) {
12143 const MCExpr *SOExpr = Op.getImm();
12144 int64_t Value;
12145 if (!SOExpr->evaluateAsAbsolute(Value))
12146 return Match_Success;
12147 assert((Value >= std::numeric_limits<int32_t>::min() &&(((Value >= std::numeric_limits<int32_t>::min() &&
Value <= std::numeric_limits<uint32_t>::max()) &&
"expression value must be representable in 32 bits") ? static_cast
<void> (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 12149, __PRETTY_FUNCTION__))
12148 Value <= std::numeric_limits<uint32_t>::max()) &&(((Value >= std::numeric_limits<int32_t>::min() &&
Value <= std::numeric_limits<uint32_t>::max()) &&
"expression value must be representable in 32 bits") ? static_cast
<void> (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 12149, __PRETTY_FUNCTION__))
12149 "expression value must be representable in 32 bits")(((Value >= std::numeric_limits<int32_t>::min() &&
Value <= std::numeric_limits<uint32_t>::max()) &&
"expression value must be representable in 32 bits") ? static_cast
<void> (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 12149, __PRETTY_FUNCTION__))
;
12150 }
12151 break;
12152 case MCK_rGPR:
12153 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
12154 return Match_Success;
12155 return Match_rGPR;
12156 case MCK_GPRPair:
12157 if (Op.isReg() &&
12158 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
12159 return Match_Success;
12160 break;
12161 }
12162 return Match_InvalidOperand;
12163}
12164
12165bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
12166 StringRef ExtraToken) {
12167 if (!hasMVE())
12168 return false;
12169
12170 return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") ||
12171 Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") ||
12172 Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") ||
12173 Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") ||
12174 Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") ||
12175 Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") ||
12176 Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") ||
12177 Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") ||
12178 Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") ||
12179 Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") ||
12180 Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") ||
12181 Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") ||
12182 Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") ||
12183 Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") ||
12184 Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") ||
12185 Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") ||
12186 Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") ||
12187 Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") ||
12188 Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") ||
12189 Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") ||
12190 Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") ||
12191 Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") ||
12192 Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") ||
12193 Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") ||
12194 Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") ||
12195 Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") ||
12196 Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") ||
12197 Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") ||
12198 Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") ||
12199 Mnemonic.startswith("vqabs") ||
12200 (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
12201 Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") ||
12202 Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") ||
12203 Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") ||
12204 Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") ||
12205 Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") ||
12206 Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") ||
12207 Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") ||
12208 Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") ||
12209 Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") ||
12210 Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") ||
12211 Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") ||
12212 Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") ||
12213 Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") ||
12214 Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") ||
12215 Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") ||
12216 Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") ||
12217 Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") ||
12218 Mnemonic.startswith("vldrb") ||
12219 (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") ||
12220 (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
12221 Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") ||
12222 Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") ||
12223 Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") ||
12224 Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") ||
12225 Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") ||
12226 Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") ||
12227 Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") ||
12228 Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") ||
12229 Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") ||
12230 Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") ||
12231 Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") ||
12232 Mnemonic.startswith("vcvt") ||
12233 MS.isVPTPredicableCDEInstr(Mnemonic) ||
12234 (Mnemonic.startswith("vmov") &&
12235 !(ExtraToken == ".f16" || ExtraToken == ".32" ||
12236 ExtraToken == ".16" || ExtraToken == ".8"));
12237}

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/MC/MCAsmMacro.h

1//===- MCAsmMacro.h - Assembly Macros ---------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_MC_MCASMMACRO_H
10#define LLVM_MC_MCASMMACRO_H
11
12#include "llvm/ADT/APInt.h"
13#include "llvm/ADT/StringRef.h"
14#include "llvm/Support/Debug.h"
15#include "llvm/Support/SMLoc.h"
16#include <vector>
17
18namespace llvm {
19
20/// Target independent representation for an assembler token.
21class AsmToken {
22public:
23 enum TokenKind {
24 // Markers
25 Eof, Error,
26
27 // String values.
28 Identifier,
29 String,
30
31 // Integer values.
32 Integer,
33 BigNum, // larger than 64 bits
34
35 // Real values.
36 Real,
37
38 // Comments
39 Comment,
40 HashDirective,
41 // No-value.
42 EndOfStatement,
43 Colon,
44 Space,
45 Plus, Minus, Tilde,
46 Slash, // '/'
47 BackSlash, // '\'
48 LParen, RParen, LBrac, RBrac, LCurly, RCurly,
49 Star, Dot, Comma, Dollar, Equal, EqualEqual,
50
51 Pipe, PipePipe, Caret,
52 Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
53 Less, LessEqual, LessLess, LessGreater,
54 Greater, GreaterEqual, GreaterGreater, At, MinusGreater,
55
56 // MIPS unary expression operators such as %neg.
57 PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
58 PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
59 PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
60 PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
61 PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
62 PercentTprel_Lo
63 };
64
65private:
66 TokenKind Kind;
67
68 /// A reference to the entire token contents; this is always a pointer into
69 /// a memory buffer owned by the source manager.
70 StringRef Str;
71
72 APInt IntVal;
73
74public:
75 AsmToken() = default;
76 AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
77 : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
78 AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
79 : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
80
81 TokenKind getKind() const { return Kind; }
82 bool is(TokenKind K) const { return Kind == K; }
36
Assuming 'K' is not equal to field 'Kind'
37
Returning zero, which participates in a condition later
83 bool isNot(TokenKind K) const { return Kind != K; }
84
85 SMLoc getLoc() const;
86 SMLoc getEndLoc() const;
87 SMRange getLocRange() const;
88
89 /// Get the contents of a string token (without quotes).
90 StringRef getStringContents() const {
91 assert(Kind == String && "This token isn't a string!")((Kind == String && "This token isn't a string!") ? static_cast
<void> (0) : __assert_fail ("Kind == String && \"This token isn't a string!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/MC/MCAsmMacro.h"
, 91, __PRETTY_FUNCTION__))
;
92 return Str.slice(1, Str.size() - 1);
93 }
94
95 /// Get the identifier string for the current token, which should be an
96 /// identifier or a string. This gets the portion of the string which should
97 /// be used as the identifier, e.g., it does not include the quotes on
98 /// strings.
99 StringRef getIdentifier() const {
100 if (Kind == Identifier)
101 return getString();
102 return getStringContents();
103 }
104
105 /// Get the string for the current token, this includes all characters (for
106 /// example, the quotes on strings) in the token.
107 ///
108 /// The returned StringRef points into the source manager's memory buffer, and
109 /// is safe to store across calls to Lex().
110 StringRef getString() const { return Str; }
111
112 // FIXME: Don't compute this in advance, it makes every token larger, and is
113 // also not generally what we want (it is nicer for recovery etc. to lex 123br
114 // as a single token, then diagnose as an invalid number).
115 int64_t getIntVal() const {
116 assert(Kind == Integer && "This token isn't an integer!")((Kind == Integer && "This token isn't an integer!") ?
static_cast<void> (0) : __assert_fail ("Kind == Integer && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/MC/MCAsmMacro.h"
, 116, __PRETTY_FUNCTION__))
;
117 return IntVal.getZExtValue();
118 }
119
120 APInt getAPIntVal() const {
121 assert((Kind == Integer || Kind == BigNum) &&(((Kind == Integer || Kind == BigNum) && "This token isn't an integer!"
) ? static_cast<void> (0) : __assert_fail ("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/MC/MCAsmMacro.h"
, 122, __PRETTY_FUNCTION__))
122 "This token isn't an integer!")(((Kind == Integer || Kind == BigNum) && "This token isn't an integer!"
) ? static_cast<void> (0) : __assert_fail ("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/MC/MCAsmMacro.h"
, 122, __PRETTY_FUNCTION__))
;
123 return IntVal;
124 }
125
126 void dump(raw_ostream &OS) const;
127};
128
129struct MCAsmMacroParameter {
130 StringRef Name;
131 std::vector<AsmToken> Value;
132 bool Required = false;
133 bool Vararg = false;
134
135#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
136 void dump() const { dump(dbgs()); }
137 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump(raw_ostream &OS) const;
138#endif
139};
140
141typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
142struct MCAsmMacro {
143 StringRef Name;
144 StringRef Body;
145 MCAsmMacroParameters Parameters;
146
147public:
148 MCAsmMacro(StringRef N, StringRef B, MCAsmMacroParameters P)
149 : Name(N), Body(B), Parameters(std::move(P)) {}
150
151#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
152 void dump() const { dump(dbgs()); }
153 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump(raw_ostream &OS) const;
154#endif
155};
156} // namespace llvm
157
158#endif