Bug Summary

File:lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Warning:line 271, column 36
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'

Annotated Source Code

[?] Use j/k keys for keyboard navigation

1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "ARMFeatures.h"
11#include "Utils/ARMBaseInfo.h"
12#include "MCTargetDesc/ARMAddressingModes.h"
13#include "MCTargetDesc/ARMBaseInfo.h"
14#include "MCTargetDesc/ARMMCExpr.h"
15#include "MCTargetDesc/ARMMCTargetDesc.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Triple.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
30#include "llvm/MC/MCInstrDesc.h"
31#include "llvm/MC/MCInstrInfo.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCAsmParserUtils.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCSection.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
43#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/Support/ARMBuildAttributes.h"
46#include "llvm/Support/ARMEHABI.h"
47#include "llvm/Support/Casting.h"
48#include "llvm/Support/CommandLine.h"
49#include "llvm/Support/Compiler.h"
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/MathExtras.h"
52#include "llvm/Support/SMLoc.h"
53#include "llvm/Support/TargetParser.h"
54#include "llvm/Support/TargetRegistry.h"
55#include "llvm/Support/raw_ostream.h"
56#include <algorithm>
57#include <cassert>
58#include <cstddef>
59#include <cstdint>
60#include <iterator>
61#include <limits>
62#include <memory>
63#include <string>
64#include <utility>
65#include <vector>
66
67#define DEBUG_TYPE"asm-parser" "asm-parser"
68
69using namespace llvm;
70
71namespace {
72
73enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
74
75static cl::opt<ImplicitItModeTy> ImplicitItMode(
76 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
77 cl::desc("Allow conditional instructions outdside of an IT block"),
78 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
79 "Accept in both ISAs, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
,
80 clEnumValN(ImplicitItModeTy::Never, "never",llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
81 "Warn in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
,
82 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
83 "Accept in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
,
84 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
85 "Warn in ARM, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
));
86
87static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
88 cl::init(false));
89
90enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
91
92class UnwindContext {
93 using Locs = SmallVector<SMLoc, 4>;
94
95 MCAsmParser &Parser;
96 Locs FnStartLocs;
97 Locs CantUnwindLocs;
98 Locs PersonalityLocs;
99 Locs PersonalityIndexLocs;
100 Locs HandlerDataLocs;
101 int FPReg;
102
103public:
104 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
105
106 bool hasFnStart() const { return !FnStartLocs.empty(); }
107 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
108 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
109
110 bool hasPersonality() const {
111 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
112 }
113
114 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
115 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
116 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
117 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
118 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
119
120 void saveFPReg(int Reg) { FPReg = Reg; }
121 int getFPReg() const { return FPReg; }
122
123 void emitFnStartLocNotes() const {
124 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
125 FI != FE; ++FI)
126 Parser.Note(*FI, ".fnstart was specified here");
127 }
128
129 void emitCantUnwindLocNotes() const {
130 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
131 UE = CantUnwindLocs.end(); UI != UE; ++UI)
132 Parser.Note(*UI, ".cantunwind was specified here");
133 }
134
135 void emitHandlerDataLocNotes() const {
136 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
137 HE = HandlerDataLocs.end(); HI != HE; ++HI)
138 Parser.Note(*HI, ".handlerdata was specified here");
139 }
140
141 void emitPersonalityLocNotes() const {
142 for (Locs::const_iterator PI = PersonalityLocs.begin(),
143 PE = PersonalityLocs.end(),
144 PII = PersonalityIndexLocs.begin(),
145 PIE = PersonalityIndexLocs.end();
146 PI != PE || PII != PIE;) {
147 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
148 Parser.Note(*PI++, ".personality was specified here");
149 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
150 Parser.Note(*PII++, ".personalityindex was specified here");
151 else
152 llvm_unreachable(".personality and .personalityindex cannot be "::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be "
"at the same location", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 153)
153 "at the same location")::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be "
"at the same location", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 153)
;
154 }
155 }
156
157 void reset() {
158 FnStartLocs = Locs();
159 CantUnwindLocs = Locs();
160 PersonalityLocs = Locs();
161 HandlerDataLocs = Locs();
162 PersonalityIndexLocs = Locs();
163 FPReg = ARM::SP;
164 }
165};
166
167class ARMAsmParser : public MCTargetAsmParser {
168 const MCRegisterInfo *MRI;
169 UnwindContext UC;
170
171 ARMTargetStreamer &getTargetStreamer() {
172 assert(getParser().getStreamer().getTargetStreamer() &&(static_cast <bool> (getParser().getStreamer().getTargetStreamer
() && "do not have a target streamer") ? void (0) : __assert_fail
("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
173 "do not have a target streamer")(static_cast <bool> (getParser().getStreamer().getTargetStreamer
() && "do not have a target streamer") ? void (0) : __assert_fail
("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
;
174 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
175 return static_cast<ARMTargetStreamer &>(TS);
176 }
177
178 // Map of register aliases registers via the .req directive.
179 StringMap<unsigned> RegisterReqs;
180
181 bool NextSymbolIsThumb;
182
183 bool useImplicitITThumb() const {
184 return ImplicitItMode == ImplicitItModeTy::Always ||
185 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
186 }
187
188 bool useImplicitITARM() const {
189 return ImplicitItMode == ImplicitItModeTy::Always ||
190 ImplicitItMode == ImplicitItModeTy::ARMOnly;
191 }
192
193 struct {
194 ARMCC::CondCodes Cond; // Condition for IT block.
195 unsigned Mask:4; // Condition mask for instructions.
196 // Starting at first 1 (from lsb).
197 // '1' condition as indicated in IT.
198 // '0' inverse of condition (else).
199 // Count of instructions in IT block is
200 // 4 - trailingzeroes(mask)
201 // Note that this does not have the same encoding
202 // as in the IT instruction, which also depends
203 // on the low bit of the condition code.
204
205 unsigned CurPosition; // Current position in parsing of IT
206 // block. In range [0,4], with 0 being the IT
207 // instruction itself. Initialized according to
208 // count of instructions in block. ~0U if no
209 // active IT block.
210
211 bool IsExplicit; // true - The IT instruction was present in the
212 // input, we should not modify it.
213 // false - The IT instruction was added
214 // implicitly, we can extend it if that
215 // would be legal.
216 } ITState;
217
218 SmallVector<MCInst, 4> PendingConditionalInsts;
219
220 void flushPendingInstructions(MCStreamer &Out) override {
221 if (!inImplicitITBlock()) {
222 assert(PendingConditionalInsts.size() == 0)(static_cast <bool> (PendingConditionalInsts.size() == 0
) ? void (0) : __assert_fail ("PendingConditionalInsts.size() == 0"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 222, __extension__ __PRETTY_FUNCTION__))
;
223 return;
224 }
225
226 // Emit the IT instruction
227 unsigned Mask = getITMaskEncoding();
228 MCInst ITInst;
229 ITInst.setOpcode(ARM::t2IT);
230 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
231 ITInst.addOperand(MCOperand::createImm(Mask));
232 Out.EmitInstruction(ITInst, getSTI());
233
234 // Emit the conditonal instructions
235 assert(PendingConditionalInsts.size() <= 4)(static_cast <bool> (PendingConditionalInsts.size() <=
4) ? void (0) : __assert_fail ("PendingConditionalInsts.size() <= 4"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 235, __extension__ __PRETTY_FUNCTION__))
;
236 for (const MCInst &Inst : PendingConditionalInsts) {
237 Out.EmitInstruction(Inst, getSTI());
238 }
239 PendingConditionalInsts.clear();
240
241 // Clear the IT state
242 ITState.Mask = 0;
243 ITState.CurPosition = ~0U;
244 }
245
246 bool inITBlock() { return ITState.CurPosition != ~0U; }
247 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
248 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
249
250 bool lastInITBlock() {
251 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
252 }
253
254 void forwardITPosition() {
255 if (!inITBlock()) return;
256 // Move to the next instruction in the IT block, if there is one. If not,
257 // mark the block as done, except for implicit IT blocks, which we leave
258 // open until we find an instruction that can't be added to it.
259 unsigned TZ = countTrailingZeros(ITState.Mask);
260 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
261 ITState.CurPosition = ~0U; // Done with the IT block after this.
262 }
263
264 // Rewind the state of the current IT block, removing the last slot from it.
265 void rewindImplicitITPosition() {
266 assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail
("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 266, __extension__ __PRETTY_FUNCTION__))
;
267 assert(ITState.CurPosition > 1)(static_cast <bool> (ITState.CurPosition > 1) ? void
(0) : __assert_fail ("ITState.CurPosition > 1", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 267, __extension__ __PRETTY_FUNCTION__))
;
268 ITState.CurPosition--;
269 unsigned TZ = countTrailingZeros(ITState.Mask);
270 unsigned NewMask = 0;
271 NewMask |= ITState.Mask & (0xC << TZ);
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'
272 NewMask |= 0x2 << TZ;
273 ITState.Mask = NewMask;
274 }
275
276 // Rewind the state of the current IT block, removing the last slot from it.
277 // If we were at the first slot, this closes the IT block.
278 void discardImplicitITBlock() {
279 assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail
("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 279, __extension__ __PRETTY_FUNCTION__))
;
280 assert(ITState.CurPosition == 1)(static_cast <bool> (ITState.CurPosition == 1) ? void (
0) : __assert_fail ("ITState.CurPosition == 1", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 280, __extension__ __PRETTY_FUNCTION__))
;
281 ITState.CurPosition = ~0U;
282 }
283
284 // Return the low-subreg of a given Q register.
285 unsigned getDRegFromQReg(unsigned QReg) const {
286 return MRI->getSubReg(QReg, ARM::dsub_0);
287 }
288
289 // Get the encoding of the IT mask, as it will appear in an IT instruction.
290 unsigned getITMaskEncoding() {
291 assert(inITBlock())(static_cast <bool> (inITBlock()) ? void (0) : __assert_fail
("inITBlock()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 291, __extension__ __PRETTY_FUNCTION__))
;
292 unsigned Mask = ITState.Mask;
293 unsigned TZ = countTrailingZeros(Mask);
294 if ((ITState.Cond & 1) == 0) {
295 assert(Mask && TZ <= 3 && "illegal IT mask value!")(static_cast <bool> (Mask && TZ <= 3 &&
"illegal IT mask value!") ? void (0) : __assert_fail ("Mask && TZ <= 3 && \"illegal IT mask value!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 295, __extension__ __PRETTY_FUNCTION__))
;
296 Mask ^= (0xE << TZ) & 0xF;
297 }
298 return Mask;
299 }
300
301 // Get the condition code corresponding to the current IT block slot.
302 ARMCC::CondCodes currentITCond() {
303 unsigned MaskBit;
304 if (ITState.CurPosition == 1)
305 MaskBit = 1;
306 else
307 MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
308
309 return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
310 }
311
312 // Invert the condition of the current IT block slot without changing any
313 // other slots in the same block.
314 void invertCurrentITCondition() {
315 if (ITState.CurPosition == 1) {
316 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
317 } else {
318 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
319 }
320 }
321
322 // Returns true if the current IT block is full (all 4 slots used).
323 bool isITBlockFull() {
324 return inITBlock() && (ITState.Mask & 1);
325 }
326
327 // Extend the current implicit IT block to have one more slot with the given
328 // condition code.
329 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
330 assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail
("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 330, __extension__ __PRETTY_FUNCTION__))
;
331 assert(!isITBlockFull())(static_cast <bool> (!isITBlockFull()) ? void (0) : __assert_fail
("!isITBlockFull()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 331, __extension__ __PRETTY_FUNCTION__))
;
332 assert(Cond == ITState.Cond ||(static_cast <bool> (Cond == ITState.Cond || Cond == ARMCC
::getOppositeCondition(ITState.Cond)) ? void (0) : __assert_fail
("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 333, __extension__ __PRETTY_FUNCTION__))
333 Cond == ARMCC::getOppositeCondition(ITState.Cond))(static_cast <bool> (Cond == ITState.Cond || Cond == ARMCC
::getOppositeCondition(ITState.Cond)) ? void (0) : __assert_fail
("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 333, __extension__ __PRETTY_FUNCTION__))
;
334 unsigned TZ = countTrailingZeros(ITState.Mask);
335 unsigned NewMask = 0;
336 // Keep any existing condition bits.
337 NewMask |= ITState.Mask & (0xE << TZ);
338 // Insert the new condition bit.
339 NewMask |= (Cond == ITState.Cond) << TZ;
340 // Move the trailing 1 down one bit.
341 NewMask |= 1 << (TZ - 1);
342 ITState.Mask = NewMask;
343 }
344
345 // Create a new implicit IT block with a dummy condition code.
346 void startImplicitITBlock() {
347 assert(!inITBlock())(static_cast <bool> (!inITBlock()) ? void (0) : __assert_fail
("!inITBlock()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 347, __extension__ __PRETTY_FUNCTION__))
;
348 ITState.Cond = ARMCC::AL;
349 ITState.Mask = 8;
350 ITState.CurPosition = 1;
351 ITState.IsExplicit = false;
352 }
353
354 // Create a new explicit IT block with the given condition and mask. The mask
355 // should be in the parsed format, with a 1 implying 't', regardless of the
356 // low bit of the condition.
357 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
358 assert(!inITBlock())(static_cast <bool> (!inITBlock()) ? void (0) : __assert_fail
("!inITBlock()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 358, __extension__ __PRETTY_FUNCTION__))
;
359 ITState.Cond = Cond;
360 ITState.Mask = Mask;
361 ITState.CurPosition = 0;
362 ITState.IsExplicit = true;
363 }
364
365 void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
366 return getParser().Note(L, Msg, Range);
367 }
368
369 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
370 return getParser().Warning(L, Msg, Range);
371 }
372
373 bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
374 return getParser().Error(L, Msg, Range);
375 }
376
377 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
378 unsigned ListNo, bool IsARPop = false);
379 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
380 unsigned ListNo);
381
382 int tryParseRegister();
383 bool tryParseRegisterWithWriteBack(OperandVector &);
384 int tryParseShiftRegister(OperandVector &);
385 bool parseRegisterList(OperandVector &);
386 bool parseMemory(OperandVector &);
387 bool parseOperand(OperandVector &, StringRef Mnemonic);
388 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
389 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
390 unsigned &ShiftAmount);
391 bool parseLiteralValues(unsigned Size, SMLoc L);
392 bool parseDirectiveThumb(SMLoc L);
393 bool parseDirectiveARM(SMLoc L);
394 bool parseDirectiveThumbFunc(SMLoc L);
395 bool parseDirectiveCode(SMLoc L);
396 bool parseDirectiveSyntax(SMLoc L);
397 bool parseDirectiveReq(StringRef Name, SMLoc L);
398 bool parseDirectiveUnreq(SMLoc L);
399 bool parseDirectiveArch(SMLoc L);
400 bool parseDirectiveEabiAttr(SMLoc L);
401 bool parseDirectiveCPU(SMLoc L);
402 bool parseDirectiveFPU(SMLoc L);
403 bool parseDirectiveFnStart(SMLoc L);
404 bool parseDirectiveFnEnd(SMLoc L);
405 bool parseDirectiveCantUnwind(SMLoc L);
406 bool parseDirectivePersonality(SMLoc L);
407 bool parseDirectiveHandlerData(SMLoc L);
408 bool parseDirectiveSetFP(SMLoc L);
409 bool parseDirectivePad(SMLoc L);
410 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
411 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
412 bool parseDirectiveLtorg(SMLoc L);
413 bool parseDirectiveEven(SMLoc L);
414 bool parseDirectivePersonalityIndex(SMLoc L);
415 bool parseDirectiveUnwindRaw(SMLoc L);
416 bool parseDirectiveTLSDescSeq(SMLoc L);
417 bool parseDirectiveMovSP(SMLoc L);
418 bool parseDirectiveObjectArch(SMLoc L);
419 bool parseDirectiveArchExtension(SMLoc L);
420 bool parseDirectiveAlign(SMLoc L);
421 bool parseDirectiveThumbSet(SMLoc L);
422
423 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
424 bool &CarrySetting, unsigned &ProcessorIMod,
425 StringRef &ITMask);
426 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
427 bool &CanAcceptCarrySet,
428 bool &CanAcceptPredicationCode);
429
430 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
431 OperandVector &Operands);
432 bool isThumb() const {
433 // FIXME: Can tablegen auto-generate this?
434 return getSTI().getFeatureBits()[ARM::ModeThumb];
435 }
436
437 bool isThumbOne() const {
438 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
439 }
440
441 bool isThumbTwo() const {
442 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
443 }
444
445 bool hasThumb() const {
446 return getSTI().getFeatureBits()[ARM::HasV4TOps];
447 }
448
449 bool hasThumb2() const {
450 return getSTI().getFeatureBits()[ARM::FeatureThumb2];
451 }
452
453 bool hasV6Ops() const {
454 return getSTI().getFeatureBits()[ARM::HasV6Ops];
455 }
456
457 bool hasV6T2Ops() const {
458 return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
459 }
460
461 bool hasV6MOps() const {
462 return getSTI().getFeatureBits()[ARM::HasV6MOps];
463 }
464
465 bool hasV7Ops() const {
466 return getSTI().getFeatureBits()[ARM::HasV7Ops];
467 }
468
469 bool hasV8Ops() const {
470 return getSTI().getFeatureBits()[ARM::HasV8Ops];
471 }
472
473 bool hasV8MBaseline() const {
474 return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
475 }
476
477 bool hasV8MMainline() const {
478 return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
479 }
480
481 bool has8MSecExt() const {
482 return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
483 }
484
485 bool hasARM() const {
486 return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
487 }
488
489 bool hasDSP() const {
490 return getSTI().getFeatureBits()[ARM::FeatureDSP];
491 }
492
493 bool hasD16() const {
494 return getSTI().getFeatureBits()[ARM::FeatureD16];
495 }
496
497 bool hasV8_1aOps() const {
498 return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
499 }
500
501 bool hasRAS() const {
502 return getSTI().getFeatureBits()[ARM::FeatureRAS];
503 }
504
505 void SwitchMode() {
506 MCSubtargetInfo &STI = copySTI();
507 uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
508 setAvailableFeatures(FB);
509 }
510
511 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
512
513 bool isMClass() const {
514 return getSTI().getFeatureBits()[ARM::FeatureMClass];
515 }
516
517 /// @name Auto-generated Match Functions
518 /// {
519
520#define GET_ASSEMBLER_HEADER
521#include "ARMGenAsmMatcher.inc"
522
523 /// }
524
525 OperandMatchResultTy parseITCondCode(OperandVector &);
526 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
527 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
528 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
529 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
530 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
531 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
532 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
533 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
534 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
535 int High);
536 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
537 return parsePKHImm(O, "lsl", 0, 31);
538 }
539 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
540 return parsePKHImm(O, "asr", 1, 32);
541 }
542 OperandMatchResultTy parseSetEndImm(OperandVector &);
543 OperandMatchResultTy parseShifterImm(OperandVector &);
544 OperandMatchResultTy parseRotImm(OperandVector &);
545 OperandMatchResultTy parseModImm(OperandVector &);
546 OperandMatchResultTy parseBitfield(OperandVector &);
547 OperandMatchResultTy parsePostIdxReg(OperandVector &);
548 OperandMatchResultTy parseAM3Offset(OperandVector &);
549 OperandMatchResultTy parseFPImm(OperandVector &);
550 OperandMatchResultTy parseVectorList(OperandVector &);
551 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
552 SMLoc &EndLoc);
553
554 // Asm Match Converter Methods
555 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
556 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
557
558 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
559 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
560 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
561 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
562 bool isITBlockTerminator(MCInst &Inst) const;
563 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
564
565public:
566 enum ARMMatchResultTy {
567 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
568 Match_RequiresNotITBlock,
569 Match_RequiresV6,
570 Match_RequiresThumb2,
571 Match_RequiresV8,
572 Match_RequiresFlagSetting,
573#define GET_OPERAND_DIAGNOSTIC_TYPES
574#include "ARMGenAsmMatcher.inc"
575
576 };
577
578 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
579 const MCInstrInfo &MII, const MCTargetOptions &Options)
580 : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
581 MCAsmParserExtension::Initialize(Parser);
582
583 // Cache the MCRegisterInfo.
584 MRI = getContext().getRegisterInfo();
585
586 // Initialize the set of available features.
587 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
588
589 // Add build attributes based on the selected target.
590 if (AddBuildAttributes)
591 getTargetStreamer().emitTargetAttributes(STI);
592
593 // Not in an ITBlock to start with.
594 ITState.CurPosition = ~0U;
595
596 NextSymbolIsThumb = false;
597 }
598
599 // Implementation of the MCTargetAsmParser interface:
600 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
601 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
602 SMLoc NameLoc, OperandVector &Operands) override;
603 bool ParseDirective(AsmToken DirectiveID) override;
604
605 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
606 unsigned Kind) override;
607 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
608
609 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
610 OperandVector &Operands, MCStreamer &Out,
611 uint64_t &ErrorInfo,
612 bool MatchingInlineAsm) override;
613 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
614 SmallVectorImpl<NearMissInfo> &NearMisses,
615 bool MatchingInlineAsm, bool &EmitInITBlock,
616 MCStreamer &Out);
617
618 struct NearMissMessage {
619 SMLoc Loc;
620 SmallString<128> Message;
621 };
622
623 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
624
625 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
626 SmallVectorImpl<NearMissMessage> &NearMissesOut,
627 SMLoc IDLoc, OperandVector &Operands);
628 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
629 OperandVector &Operands);
630
631 void onLabelParsed(MCSymbol *Symbol) override;
632};
633
634/// ARMOperand - Instances of this class represent a parsed ARM machine
635/// operand.
636class ARMOperand : public MCParsedAsmOperand {
637 enum KindTy {
638 k_CondCode,
639 k_CCOut,
640 k_ITCondMask,
641 k_CoprocNum,
642 k_CoprocReg,
643 k_CoprocOption,
644 k_Immediate,
645 k_MemBarrierOpt,
646 k_InstSyncBarrierOpt,
647 k_Memory,
648 k_PostIndexRegister,
649 k_MSRMask,
650 k_BankedReg,
651 k_ProcIFlags,
652 k_VectorIndex,
653 k_Register,
654 k_RegisterList,
655 k_DPRRegisterList,
656 k_SPRRegisterList,
657 k_VectorList,
658 k_VectorListAllLanes,
659 k_VectorListIndexed,
660 k_ShiftedRegister,
661 k_ShiftedImmediate,
662 k_ShifterImmediate,
663 k_RotateImmediate,
664 k_ModifiedImmediate,
665 k_ConstantPoolImmediate,
666 k_BitfieldDescriptor,
667 k_Token,
668 } Kind;
669
670 SMLoc StartLoc, EndLoc, AlignmentLoc;
671 SmallVector<unsigned, 8> Registers;
672
673 struct CCOp {
674 ARMCC::CondCodes Val;
675 };
676
677 struct CopOp {
678 unsigned Val;
679 };
680
681 struct CoprocOptionOp {
682 unsigned Val;
683 };
684
685 struct ITMaskOp {
686 unsigned Mask:4;
687 };
688
689 struct MBOptOp {
690 ARM_MB::MemBOpt Val;
691 };
692
693 struct ISBOptOp {
694 ARM_ISB::InstSyncBOpt Val;
695 };
696
697 struct IFlagsOp {
698 ARM_PROC::IFlags Val;
699 };
700
701 struct MMaskOp {
702 unsigned Val;
703 };
704
705 struct BankedRegOp {
706 unsigned Val;
707 };
708
709 struct TokOp {
710 const char *Data;
711 unsigned Length;
712 };
713
714 struct RegOp {
715 unsigned RegNum;
716 };
717
718 // A vector register list is a sequential list of 1 to 4 registers.
719 struct VectorListOp {
720 unsigned RegNum;
721 unsigned Count;
722 unsigned LaneIndex;
723 bool isDoubleSpaced;
724 };
725
726 struct VectorIndexOp {
727 unsigned Val;
728 };
729
730 struct ImmOp {
731 const MCExpr *Val;
732 };
733
734 /// Combined record for all forms of ARM address expressions.
735 struct MemoryOp {
736 unsigned BaseRegNum;
737 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
738 // was specified.
739 const MCConstantExpr *OffsetImm; // Offset immediate value
740 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
741 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
742 unsigned ShiftImm; // shift for OffsetReg.
743 unsigned Alignment; // 0 = no alignment specified
744 // n = alignment in bytes (2, 4, 8, 16, or 32)
745 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
746 };
747
748 struct PostIdxRegOp {
749 unsigned RegNum;
750 bool isAdd;
751 ARM_AM::ShiftOpc ShiftTy;
752 unsigned ShiftImm;
753 };
754
755 struct ShifterImmOp {
756 bool isASR;
757 unsigned Imm;
758 };
759
760 struct RegShiftedRegOp {
761 ARM_AM::ShiftOpc ShiftTy;
762 unsigned SrcReg;
763 unsigned ShiftReg;
764 unsigned ShiftImm;
765 };
766
767 struct RegShiftedImmOp {
768 ARM_AM::ShiftOpc ShiftTy;
769 unsigned SrcReg;
770 unsigned ShiftImm;
771 };
772
773 struct RotImmOp {
774 unsigned Imm;
775 };
776
777 struct ModImmOp {
778 unsigned Bits;
779 unsigned Rot;
780 };
781
782 struct BitfieldOp {
783 unsigned LSB;
784 unsigned Width;
785 };
786
787 union {
788 struct CCOp CC;
789 struct CopOp Cop;
790 struct CoprocOptionOp CoprocOption;
791 struct MBOptOp MBOpt;
792 struct ISBOptOp ISBOpt;
793 struct ITMaskOp ITMask;
794 struct IFlagsOp IFlags;
795 struct MMaskOp MMask;
796 struct BankedRegOp BankedReg;
797 struct TokOp Tok;
798 struct RegOp Reg;
799 struct VectorListOp VectorList;
800 struct VectorIndexOp VectorIndex;
801 struct ImmOp Imm;
802 struct MemoryOp Memory;
803 struct PostIdxRegOp PostIdxReg;
804 struct ShifterImmOp ShifterImm;
805 struct RegShiftedRegOp RegShiftedReg;
806 struct RegShiftedImmOp RegShiftedImm;
807 struct RotImmOp RotImm;
808 struct ModImmOp ModImm;
809 struct BitfieldOp Bitfield;
810 };
811
812public:
813 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
814
815 /// getStartLoc - Get the location of the first token of this operand.
816 SMLoc getStartLoc() const override { return StartLoc; }
817
818 /// getEndLoc - Get the location of the last token of this operand.
819 SMLoc getEndLoc() const override { return EndLoc; }
820
821 /// getLocRange - Get the range between the first and last token of this
822 /// operand.
823 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
824
825 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
826 SMLoc getAlignmentLoc() const {
827 assert(Kind == k_Memory && "Invalid access!")(static_cast <bool> (Kind == k_Memory && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Memory && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
;
828 return AlignmentLoc;
829 }
830
831 ARMCC::CondCodes getCondCode() const {
832 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 832, __extension__ __PRETTY_FUNCTION__))
;
833 return CC.Val;
834 }
835
836 unsigned getCoproc() const {
837 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!")(static_cast <bool> ((Kind == k_CoprocNum || Kind == k_CoprocReg
) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_CoprocNum || Kind == k_CoprocReg) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 837, __extension__ __PRETTY_FUNCTION__))
;
838 return Cop.Val;
839 }
840
841 StringRef getToken() const {
842 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 842, __extension__ __PRETTY_FUNCTION__))
;
843 return StringRef(Tok.Data, Tok.Length);
844 }
845
846 unsigned getReg() const override {
847 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!")(static_cast <bool> ((Kind == k_Register || Kind == k_CCOut
) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_Register || Kind == k_CCOut) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 847, __extension__ __PRETTY_FUNCTION__))
;
848 return Reg.RegNum;
849 }
850
851 const SmallVectorImpl<unsigned> &getRegList() const {
852 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_DPRRegisterList || Kind == k_SPRRegisterList) && "Invalid access!"
) ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 853, __extension__ __PRETTY_FUNCTION__))
853 Kind == k_SPRRegisterList) && "Invalid access!")(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_DPRRegisterList || Kind == k_SPRRegisterList) && "Invalid access!"
) ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList) && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 853, __extension__ __PRETTY_FUNCTION__))
;
854 return Registers;
855 }
856
857 const MCExpr *getImm() const {
858 assert(isImm() && "Invalid access!")(static_cast <bool> (isImm() && "Invalid access!"
) ? void (0) : __assert_fail ("isImm() && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 858, __extension__ __PRETTY_FUNCTION__))
;
859 return Imm.Val;
860 }
861
862 const MCExpr *getConstantPoolImm() const {
863 assert(isConstantPoolImm() && "Invalid access!")(static_cast <bool> (isConstantPoolImm() && "Invalid access!"
) ? void (0) : __assert_fail ("isConstantPoolImm() && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 863, __extension__ __PRETTY_FUNCTION__))
;
864 return Imm.Val;
865 }
866
867 unsigned getVectorIndex() const {
868 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 868, __extension__ __PRETTY_FUNCTION__))
;
869 return VectorIndex.Val;
870 }
871
872 ARM_MB::MemBOpt getMemBarrierOpt() const {
873 assert(Kind == k_MemBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_MemBarrierOpt &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MemBarrierOpt && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 873, __extension__ __PRETTY_FUNCTION__))
;
874 return MBOpt.Val;
875 }
876
877 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
878 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_InstSyncBarrierOpt &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_InstSyncBarrierOpt && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 878, __extension__ __PRETTY_FUNCTION__))
;
879 return ISBOpt.Val;
880 }
881
882 ARM_PROC::IFlags getProcIFlags() const {
883 assert(Kind == k_ProcIFlags && "Invalid access!")(static_cast <bool> (Kind == k_ProcIFlags && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ProcIFlags && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 883, __extension__ __PRETTY_FUNCTION__))
;
884 return IFlags.Val;
885 }
886
887 unsigned getMSRMask() const {
888 assert(Kind == k_MSRMask && "Invalid access!")(static_cast <bool> (Kind == k_MSRMask && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_MSRMask && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 888, __extension__ __PRETTY_FUNCTION__))
;
889 return MMask.Val;
890 }
891
892 unsigned getBankedReg() const {
893 assert(Kind == k_BankedReg && "Invalid access!")(static_cast <bool> (Kind == k_BankedReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BankedReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 893, __extension__ __PRETTY_FUNCTION__))
;
894 return BankedReg.Val;
895 }
896
897 bool isCoprocNum() const { return Kind == k_CoprocNum; }
898 bool isCoprocReg() const { return Kind == k_CoprocReg; }
899 bool isCoprocOption() const { return Kind == k_CoprocOption; }
900 bool isCondCode() const { return Kind == k_CondCode; }
901 bool isCCOut() const { return Kind == k_CCOut; }
902 bool isITMask() const { return Kind == k_ITCondMask; }
903 bool isITCondCode() const { return Kind == k_CondCode; }
904 bool isImm() const override {
905 return Kind == k_Immediate;
906 }
907
908 bool isARMBranchTarget() const {
909 if (!isImm()) return false;
910
911 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
912 return CE->getValue() % 4 == 0;
913 return true;
914 }
915
916
917 bool isThumbBranchTarget() const {
918 if (!isImm()) return false;
919
920 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
921 return CE->getValue() % 2 == 0;
922 return true;
923 }
924
925 // checks whether this operand is an unsigned offset which fits is a field
926 // of specified width and scaled by a specific number of bits
927 template<unsigned width, unsigned scale>
928 bool isUnsignedOffset() const {
929 if (!isImm()) return false;
930 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
931 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
932 int64_t Val = CE->getValue();
933 int64_t Align = 1LL << scale;
934 int64_t Max = Align * ((1LL << width) - 1);
935 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
936 }
937 return false;
938 }
939
940 // checks whether this operand is an signed offset which fits is a field
941 // of specified width and scaled by a specific number of bits
942 template<unsigned width, unsigned scale>
943 bool isSignedOffset() const {
944 if (!isImm()) return false;
945 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
946 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
947 int64_t Val = CE->getValue();
948 int64_t Align = 1LL << scale;
949 int64_t Max = Align * ((1LL << (width-1)) - 1);
950 int64_t Min = -Align * (1LL << (width-1));
951 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
952 }
953 return false;
954 }
955
956 // checks whether this operand is a memory operand computed as an offset
957 // applied to PC. the offset may have 8 bits of magnitude and is represented
958 // with two bits of shift. textually it may be either [pc, #imm], #imm or
959 // relocable expression...
960 bool isThumbMemPC() const {
961 int64_t Val = 0;
962 if (isImm()) {
963 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
964 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
965 if (!CE) return false;
966 Val = CE->getValue();
967 }
968 else if (isMem()) {
969 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
970 if(Memory.BaseRegNum != ARM::PC) return false;
971 Val = Memory.OffsetImm->getValue();
972 }
973 else return false;
974 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
975 }
976
977 bool isFPImm() const {
978 if (!isImm()) return false;
979 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
980 if (!CE) return false;
981 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
982 return Val != -1;
983 }
984
985 template<int64_t N, int64_t M>
986 bool isImmediate() const {
987 if (!isImm()) return false;
988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
989 if (!CE) return false;
990 int64_t Value = CE->getValue();
991 return Value >= N && Value <= M;
992 }
993
994 template<int64_t N, int64_t M>
995 bool isImmediateS4() const {
996 if (!isImm()) return false;
997 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
998 if (!CE) return false;
999 int64_t Value = CE->getValue();
1000 return ((Value & 3) == 0) && Value >= N && Value <= M;
1001 }
1002
1003 bool isFBits16() const {
1004 return isImmediate<0, 17>();
1005 }
1006 bool isFBits32() const {
1007 return isImmediate<1, 33>();
1008 }
1009 bool isImm8s4() const {
1010 return isImmediateS4<-1020, 1020>();
1011 }
1012 bool isImm0_1020s4() const {
1013 return isImmediateS4<0, 1020>();
1014 }
1015 bool isImm0_508s4() const {
1016 return isImmediateS4<0, 508>();
1017 }
1018 bool isImm0_508s4Neg() const {
1019 if (!isImm()) return false;
1020 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1021 if (!CE) return false;
1022 int64_t Value = -CE->getValue();
1023 // explicitly exclude zero. we want that to use the normal 0_508 version.
1024 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1025 }
1026
1027 bool isImm0_4095Neg() const {
1028 if (!isImm()) return false;
1029 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1030 if (!CE) return false;
1031 int64_t Value = -CE->getValue();
1032 return Value > 0 && Value < 4096;
1033 }
1034
1035 bool isImm0_7() const {
1036 return isImmediate<0, 7>();
1037 }
1038
1039 bool isImm1_16() const {
1040 return isImmediate<1, 16>();
1041 }
1042
1043 bool isImm1_32() const {
1044 return isImmediate<1, 32>();
1045 }
1046
1047 bool isImm8_255() const {
1048 return isImmediate<8, 255>();
1049 }
1050
1051 bool isImm256_65535Expr() const {
1052 if (!isImm()) return false;
1053 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1054 // If it's not a constant expression, it'll generate a fixup and be
1055 // handled later.
1056 if (!CE) return true;
1057 int64_t Value = CE->getValue();
1058 return Value >= 256 && Value < 65536;
1059 }
1060
1061 bool isImm0_65535Expr() const {
1062 if (!isImm()) return false;
1063 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1064 // If it's not a constant expression, it'll generate a fixup and be
1065 // handled later.
1066 if (!CE) return true;
1067 int64_t Value = CE->getValue();
1068 return Value >= 0 && Value < 65536;
1069 }
1070
1071 bool isImm24bit() const {
1072 return isImmediate<0, 0xffffff + 1>();
1073 }
1074
1075 bool isImmThumbSR() const {
1076 return isImmediate<1, 33>();
1077 }
1078
1079 bool isPKHLSLImm() const {
1080 return isImmediate<0, 32>();
1081 }
1082
1083 bool isPKHASRImm() const {
1084 return isImmediate<0, 33>();
1085 }
1086
1087 bool isAdrLabel() const {
1088 // If we have an immediate that's not a constant, treat it as a label
1089 // reference needing a fixup.
1090 if (isImm() && !isa<MCConstantExpr>(getImm()))
1091 return true;
1092
1093 // If it is a constant, it must fit into a modified immediate encoding.
1094 if (!isImm()) return false;
1095 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1096 if (!CE) return false;
1097 int64_t Value = CE->getValue();
1098 return (ARM_AM::getSOImmVal(Value) != -1 ||
1099 ARM_AM::getSOImmVal(-Value) != -1);
1100 }
1101
1102 bool isT2SOImm() const {
1103 // If we have an immediate that's not a constant, treat it as an expression
1104 // needing a fixup.
1105 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1106 // We want to avoid matching :upper16: and :lower16: as we want these
1107 // expressions to match in isImm0_65535Expr()
1108 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1109 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1110 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1111 }
1112 if (!isImm()) return false;
1113 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1114 if (!CE) return false;
1115 int64_t Value = CE->getValue();
1116 return ARM_AM::getT2SOImmVal(Value) != -1;
1117 }
1118
1119 bool isT2SOImmNot() const {
1120 if (!isImm()) return false;
1121 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1122 if (!CE) return false;
1123 int64_t Value = CE->getValue();
1124 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1125 ARM_AM::getT2SOImmVal(~Value) != -1;
1126 }
1127
1128 bool isT2SOImmNeg() const {
1129 if (!isImm()) return false;
1130 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131 if (!CE) return false;
1132 int64_t Value = CE->getValue();
1133 // Only use this when not representable as a plain so_imm.
1134 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1135 ARM_AM::getT2SOImmVal(-Value) != -1;
1136 }
1137
1138 bool isSetEndImm() const {
1139 if (!isImm()) return false;
1140 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1141 if (!CE) return false;
1142 int64_t Value = CE->getValue();
1143 return Value == 1 || Value == 0;
1144 }
1145
1146 bool isReg() const override { return Kind == k_Register; }
1147 bool isRegList() const { return Kind == k_RegisterList; }
1148 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1149 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1150 bool isToken() const override { return Kind == k_Token; }
1151 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1152 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1153 bool isMem() const override { return Kind == k_Memory; }
1154 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1155 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1156 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1157 bool isRotImm() const { return Kind == k_RotateImmediate; }
1158 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1159
1160 bool isModImmNot() const {
1161 if (!isImm()) return false;
1162 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163 if (!CE) return false;
1164 int64_t Value = CE->getValue();
1165 return ARM_AM::getSOImmVal(~Value) != -1;
1166 }
1167
1168 bool isModImmNeg() const {
1169 if (!isImm()) return false;
1170 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1171 if (!CE) return false;
1172 int64_t Value = CE->getValue();
1173 return ARM_AM::getSOImmVal(Value) == -1 &&
1174 ARM_AM::getSOImmVal(-Value) != -1;
1175 }
1176
1177 bool isThumbModImmNeg1_7() const {
1178 if (!isImm()) return false;
1179 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1180 if (!CE) return false;
1181 int32_t Value = -(int32_t)CE->getValue();
1182 return 0 < Value && Value < 8;
1183 }
1184
1185 bool isThumbModImmNeg8_255() const {
1186 if (!isImm()) return false;
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188 if (!CE) return false;
1189 int32_t Value = -(int32_t)CE->getValue();
1190 return 7 < Value && Value < 256;
1191 }
1192
1193 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1194 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1195 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1196 bool isPostIdxReg() const {
1197 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1198 }
1199 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1200 if (!isMem())
1201 return false;
1202 // No offset of any kind.
1203 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1204 (alignOK || Memory.Alignment == Alignment);
1205 }
1206 bool isMemPCRelImm12() const {
1207 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1208 return false;
1209 // Base register must be PC.
1210 if (Memory.BaseRegNum != ARM::PC)
1211 return false;
1212 // Immediate offset in range [-4095, 4095].
1213 if (!Memory.OffsetImm) return true;
1214 int64_t Val = Memory.OffsetImm->getValue();
1215 return (Val > -4096 && Val < 4096) ||
1216 (Val == std::numeric_limits<int32_t>::min());
1217 }
1218
1219 bool isAlignedMemory() const {
1220 return isMemNoOffset(true);
1221 }
1222
1223 bool isAlignedMemoryNone() const {
1224 return isMemNoOffset(false, 0);
1225 }
1226
1227 bool isDupAlignedMemoryNone() const {
1228 return isMemNoOffset(false, 0);
1229 }
1230
1231 bool isAlignedMemory16() const {
1232 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1233 return true;
1234 return isMemNoOffset(false, 0);
1235 }
1236
1237 bool isDupAlignedMemory16() const {
1238 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1239 return true;
1240 return isMemNoOffset(false, 0);
1241 }
1242
1243 bool isAlignedMemory32() const {
1244 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1245 return true;
1246 return isMemNoOffset(false, 0);
1247 }
1248
1249 bool isDupAlignedMemory32() const {
1250 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1251 return true;
1252 return isMemNoOffset(false, 0);
1253 }
1254
1255 bool isAlignedMemory64() const {
1256 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1257 return true;
1258 return isMemNoOffset(false, 0);
1259 }
1260
1261 bool isDupAlignedMemory64() const {
1262 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1263 return true;
1264 return isMemNoOffset(false, 0);
1265 }
1266
1267 bool isAlignedMemory64or128() const {
1268 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1269 return true;
1270 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1271 return true;
1272 return isMemNoOffset(false, 0);
1273 }
1274
1275 bool isDupAlignedMemory64or128() const {
1276 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1277 return true;
1278 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1279 return true;
1280 return isMemNoOffset(false, 0);
1281 }
1282
1283 bool isAlignedMemory64or128or256() const {
1284 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1285 return true;
1286 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1287 return true;
1288 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1289 return true;
1290 return isMemNoOffset(false, 0);
1291 }
1292
1293 bool isAddrMode2() const {
1294 if (!isMem() || Memory.Alignment != 0) return false;
1295 // Check for register offset.
1296 if (Memory.OffsetRegNum) return true;
1297 // Immediate offset in range [-4095, 4095].
1298 if (!Memory.OffsetImm) return true;
1299 int64_t Val = Memory.OffsetImm->getValue();
1300 return Val > -4096 && Val < 4096;
1301 }
1302
1303 bool isAM2OffsetImm() const {
1304 if (!isImm()) return false;
1305 // Immediate offset in range [-4095, 4095].
1306 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1307 if (!CE) return false;
1308 int64_t Val = CE->getValue();
1309 return (Val == std::numeric_limits<int32_t>::min()) ||
1310 (Val > -4096 && Val < 4096);
1311 }
1312
1313 bool isAddrMode3() const {
1314 // If we have an immediate that's not a constant, treat it as a label
1315 // reference needing a fixup. If it is a constant, it's something else
1316 // and we reject it.
1317 if (isImm() && !isa<MCConstantExpr>(getImm()))
1318 return true;
1319 if (!isMem() || Memory.Alignment != 0) return false;
1320 // No shifts are legal for AM3.
1321 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1322 // Check for register offset.
1323 if (Memory.OffsetRegNum) return true;
1324 // Immediate offset in range [-255, 255].
1325 if (!Memory.OffsetImm) return true;
1326 int64_t Val = Memory.OffsetImm->getValue();
1327 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1328 // have to check for this too.
1329 return (Val > -256 && Val < 256) ||
1330 Val == std::numeric_limits<int32_t>::min();
1331 }
1332
1333 bool isAM3Offset() const {
1334 if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1335 return false;
1336 if (Kind == k_PostIndexRegister)
1337 return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1338 // Immediate offset in range [-255, 255].
1339 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1340 if (!CE) return false;
1341 int64_t Val = CE->getValue();
1342 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1343 return (Val > -256 && Val < 256) ||
1344 Val == std::numeric_limits<int32_t>::min();
1345 }
1346
1347 bool isAddrMode5() const {
1348 // If we have an immediate that's not a constant, treat it as a label
1349 // reference needing a fixup. If it is a constant, it's something else
1350 // and we reject it.
1351 if (isImm() && !isa<MCConstantExpr>(getImm()))
1352 return true;
1353 if (!isMem() || Memory.Alignment != 0) return false;
1354 // Check for register offset.
1355 if (Memory.OffsetRegNum) return false;
1356 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1357 if (!Memory.OffsetImm) return true;
1358 int64_t Val = Memory.OffsetImm->getValue();
1359 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1360 Val == std::numeric_limits<int32_t>::min();
1361 }
1362
1363 bool isAddrMode5FP16() const {
1364 // If we have an immediate that's not a constant, treat it as a label
1365 // reference needing a fixup. If it is a constant, it's something else
1366 // and we reject it.
1367 if (isImm() && !isa<MCConstantExpr>(getImm()))
1368 return true;
1369 if (!isMem() || Memory.Alignment != 0) return false;
1370 // Check for register offset.
1371 if (Memory.OffsetRegNum) return false;
1372 // Immediate offset in range [-510, 510] and a multiple of 2.
1373 if (!Memory.OffsetImm) return true;
1374 int64_t Val = Memory.OffsetImm->getValue();
1375 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1376 Val == std::numeric_limits<int32_t>::min();
1377 }
1378
1379 bool isMemTBB() const {
1380 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1381 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1382 return false;
1383 return true;
1384 }
1385
1386 bool isMemTBH() const {
1387 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1388 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1389 Memory.Alignment != 0 )
1390 return false;
1391 return true;
1392 }
1393
1394 bool isMemRegOffset() const {
1395 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1396 return false;
1397 return true;
1398 }
1399
1400 bool isT2MemRegOffset() const {
1401 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1402 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1403 return false;
1404 // Only lsl #{0, 1, 2, 3} allowed.
1405 if (Memory.ShiftType == ARM_AM::no_shift)
1406 return true;
1407 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1408 return false;
1409 return true;
1410 }
1411
1412 bool isMemThumbRR() const {
1413 // Thumb reg+reg addressing is simple. Just two registers, a base and
1414 // an offset. No shifts, negations or any other complicating factors.
1415 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1416 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1417 return false;
1418 return isARMLowRegister(Memory.BaseRegNum) &&
1419 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1420 }
1421
1422 bool isMemThumbRIs4() const {
1423 if (!isMem() || Memory.OffsetRegNum != 0 ||
1424 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1425 return false;
1426 // Immediate offset, multiple of 4 in range [0, 124].
1427 if (!Memory.OffsetImm) return true;
1428 int64_t Val = Memory.OffsetImm->getValue();
1429 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1430 }
1431
1432 bool isMemThumbRIs2() const {
1433 if (!isMem() || Memory.OffsetRegNum != 0 ||
1434 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1435 return false;
1436 // Immediate offset, multiple of 4 in range [0, 62].
1437 if (!Memory.OffsetImm) return true;
1438 int64_t Val = Memory.OffsetImm->getValue();
1439 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1440 }
1441
1442 bool isMemThumbRIs1() const {
1443 if (!isMem() || Memory.OffsetRegNum != 0 ||
1444 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1445 return false;
1446 // Immediate offset in range [0, 31].
1447 if (!Memory.OffsetImm) return true;
1448 int64_t Val = Memory.OffsetImm->getValue();
1449 return Val >= 0 && Val <= 31;
1450 }
1451
1452 bool isMemThumbSPI() const {
1453 if (!isMem() || Memory.OffsetRegNum != 0 ||
1454 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1455 return false;
1456 // Immediate offset, multiple of 4 in range [0, 1020].
1457 if (!Memory.OffsetImm) return true;
1458 int64_t Val = Memory.OffsetImm->getValue();
1459 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1460 }
1461
1462 bool isMemImm8s4Offset() const {
1463 // If we have an immediate that's not a constant, treat it as a label
1464 // reference needing a fixup. If it is a constant, it's something else
1465 // and we reject it.
1466 if (isImm() && !isa<MCConstantExpr>(getImm()))
1467 return true;
1468 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1469 return false;
1470 // Immediate offset a multiple of 4 in range [-1020, 1020].
1471 if (!Memory.OffsetImm) return true;
1472 int64_t Val = Memory.OffsetImm->getValue();
1473 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1474 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1475 Val == std::numeric_limits<int32_t>::min();
1476 }
1477
1478 bool isMemImm0_1020s4Offset() const {
1479 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1480 return false;
1481 // Immediate offset a multiple of 4 in range [0, 1020].
1482 if (!Memory.OffsetImm) return true;
1483 int64_t Val = Memory.OffsetImm->getValue();
1484 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1485 }
1486
1487 bool isMemImm8Offset() const {
1488 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1489 return false;
1490 // Base reg of PC isn't allowed for these encodings.
1491 if (Memory.BaseRegNum == ARM::PC) return false;
1492 // Immediate offset in range [-255, 255].
1493 if (!Memory.OffsetImm) return true;
1494 int64_t Val = Memory.OffsetImm->getValue();
1495 return (Val == std::numeric_limits<int32_t>::min()) ||
1496 (Val > -256 && Val < 256);
1497 }
1498
1499 bool isMemPosImm8Offset() const {
1500 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1501 return false;
1502 // Immediate offset in range [0, 255].
1503 if (!Memory.OffsetImm) return true;
1504 int64_t Val = Memory.OffsetImm->getValue();
1505 return Val >= 0 && Val < 256;
1506 }
1507
1508 bool isMemNegImm8Offset() const {
1509 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1510 return false;
1511 // Base reg of PC isn't allowed for these encodings.
1512 if (Memory.BaseRegNum == ARM::PC) return false;
1513 // Immediate offset in range [-255, -1].
1514 if (!Memory.OffsetImm) return false;
1515 int64_t Val = Memory.OffsetImm->getValue();
1516 return (Val == std::numeric_limits<int32_t>::min()) ||
1517 (Val > -256 && Val < 0);
1518 }
1519
1520 bool isMemUImm12Offset() const {
1521 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1522 return false;
1523 // Immediate offset in range [0, 4095].
1524 if (!Memory.OffsetImm) return true;
1525 int64_t Val = Memory.OffsetImm->getValue();
1526 return (Val >= 0 && Val < 4096);
1527 }
1528
1529 bool isMemImm12Offset() const {
1530 // If we have an immediate that's not a constant, treat it as a label
1531 // reference needing a fixup. If it is a constant, it's something else
1532 // and we reject it.
1533
1534 if (isImm() && !isa<MCConstantExpr>(getImm()))
1535 return true;
1536
1537 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1538 return false;
1539 // Immediate offset in range [-4095, 4095].
1540 if (!Memory.OffsetImm) return true;
1541 int64_t Val = Memory.OffsetImm->getValue();
1542 return (Val > -4096 && Val < 4096) ||
1543 (Val == std::numeric_limits<int32_t>::min());
1544 }
1545
1546 bool isConstPoolAsmImm() const {
1547 // Delay processing of Constant Pool Immediate, this will turn into
1548 // a constant. Match no other operand
1549 return (isConstantPoolImm());
1550 }
1551
1552 bool isPostIdxImm8() const {
1553 if (!isImm()) return false;
1554 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1555 if (!CE) return false;
1556 int64_t Val = CE->getValue();
1557 return (Val > -256 && Val < 256) ||
1558 (Val == std::numeric_limits<int32_t>::min());
1559 }
1560
1561 bool isPostIdxImm8s4() const {
1562 if (!isImm()) return false;
1563 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1564 if (!CE) return false;
1565 int64_t Val = CE->getValue();
1566 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1567 (Val == std::numeric_limits<int32_t>::min());
1568 }
1569
1570 bool isMSRMask() const { return Kind == k_MSRMask; }
1571 bool isBankedReg() const { return Kind == k_BankedReg; }
1572 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1573
1574 // NEON operands.
1575 bool isSingleSpacedVectorList() const {
1576 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1577 }
1578
1579 bool isDoubleSpacedVectorList() const {
1580 return Kind == k_VectorList && VectorList.isDoubleSpaced;
1581 }
1582
1583 bool isVecListOneD() const {
1584 if (!isSingleSpacedVectorList()) return false;
1585 return VectorList.Count == 1;
1586 }
1587
1588 bool isVecListDPair() const {
1589 if (!isSingleSpacedVectorList()) return false;
1590 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1591 .contains(VectorList.RegNum));
1592 }
1593
1594 bool isVecListThreeD() const {
1595 if (!isSingleSpacedVectorList()) return false;
1596 return VectorList.Count == 3;
1597 }
1598
1599 bool isVecListFourD() const {
1600 if (!isSingleSpacedVectorList()) return false;
1601 return VectorList.Count == 4;
1602 }
1603
1604 bool isVecListDPairSpaced() const {
1605 if (Kind != k_VectorList) return false;
1606 if (isSingleSpacedVectorList()) return false;
1607 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1608 .contains(VectorList.RegNum));
1609 }
1610
1611 bool isVecListThreeQ() const {
1612 if (!isDoubleSpacedVectorList()) return false;
1613 return VectorList.Count == 3;
1614 }
1615
1616 bool isVecListFourQ() const {
1617 if (!isDoubleSpacedVectorList()) return false;
1618 return VectorList.Count == 4;
1619 }
1620
1621 bool isSingleSpacedVectorAllLanes() const {
1622 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1623 }
1624
1625 bool isDoubleSpacedVectorAllLanes() const {
1626 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1627 }
1628
1629 bool isVecListOneDAllLanes() const {
1630 if (!isSingleSpacedVectorAllLanes()) return false;
1631 return VectorList.Count == 1;
1632 }
1633
1634 bool isVecListDPairAllLanes() const {
1635 if (!isSingleSpacedVectorAllLanes()) return false;
1636 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1637 .contains(VectorList.RegNum));
1638 }
1639
1640 bool isVecListDPairSpacedAllLanes() const {
1641 if (!isDoubleSpacedVectorAllLanes()) return false;
1642 return VectorList.Count == 2;
1643 }
1644
1645 bool isVecListThreeDAllLanes() const {
1646 if (!isSingleSpacedVectorAllLanes()) return false;
1647 return VectorList.Count == 3;
1648 }
1649
1650 bool isVecListThreeQAllLanes() const {
1651 if (!isDoubleSpacedVectorAllLanes()) return false;
1652 return VectorList.Count == 3;
1653 }
1654
1655 bool isVecListFourDAllLanes() const {
1656 if (!isSingleSpacedVectorAllLanes()) return false;
1657 return VectorList.Count == 4;
1658 }
1659
1660 bool isVecListFourQAllLanes() const {
1661 if (!isDoubleSpacedVectorAllLanes()) return false;
1662 return VectorList.Count == 4;
1663 }
1664
1665 bool isSingleSpacedVectorIndexed() const {
1666 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1667 }
1668
1669 bool isDoubleSpacedVectorIndexed() const {
1670 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1671 }
1672
1673 bool isVecListOneDByteIndexed() const {
1674 if (!isSingleSpacedVectorIndexed()) return false;
1675 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1676 }
1677
1678 bool isVecListOneDHWordIndexed() const {
1679 if (!isSingleSpacedVectorIndexed()) return false;
1680 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1681 }
1682
1683 bool isVecListOneDWordIndexed() const {
1684 if (!isSingleSpacedVectorIndexed()) return false;
1685 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1686 }
1687
1688 bool isVecListTwoDByteIndexed() const {
1689 if (!isSingleSpacedVectorIndexed()) return false;
1690 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1691 }
1692
1693 bool isVecListTwoDHWordIndexed() const {
1694 if (!isSingleSpacedVectorIndexed()) return false;
1695 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1696 }
1697
1698 bool isVecListTwoQWordIndexed() const {
1699 if (!isDoubleSpacedVectorIndexed()) return false;
1700 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1701 }
1702
1703 bool isVecListTwoQHWordIndexed() const {
1704 if (!isDoubleSpacedVectorIndexed()) return false;
1705 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1706 }
1707
1708 bool isVecListTwoDWordIndexed() const {
1709 if (!isSingleSpacedVectorIndexed()) return false;
1710 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1711 }
1712
1713 bool isVecListThreeDByteIndexed() const {
1714 if (!isSingleSpacedVectorIndexed()) return false;
1715 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1716 }
1717
1718 bool isVecListThreeDHWordIndexed() const {
1719 if (!isSingleSpacedVectorIndexed()) return false;
1720 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1721 }
1722
1723 bool isVecListThreeQWordIndexed() const {
1724 if (!isDoubleSpacedVectorIndexed()) return false;
1725 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1726 }
1727
1728 bool isVecListThreeQHWordIndexed() const {
1729 if (!isDoubleSpacedVectorIndexed()) return false;
1730 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1731 }
1732
1733 bool isVecListThreeDWordIndexed() const {
1734 if (!isSingleSpacedVectorIndexed()) return false;
1735 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1736 }
1737
1738 bool isVecListFourDByteIndexed() const {
1739 if (!isSingleSpacedVectorIndexed()) return false;
1740 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1741 }
1742
1743 bool isVecListFourDHWordIndexed() const {
1744 if (!isSingleSpacedVectorIndexed()) return false;
1745 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1746 }
1747
1748 bool isVecListFourQWordIndexed() const {
1749 if (!isDoubleSpacedVectorIndexed()) return false;
1750 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1751 }
1752
1753 bool isVecListFourQHWordIndexed() const {
1754 if (!isDoubleSpacedVectorIndexed()) return false;
1755 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1756 }
1757
1758 bool isVecListFourDWordIndexed() const {
1759 if (!isSingleSpacedVectorIndexed()) return false;
1760 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1761 }
1762
1763 bool isVectorIndex8() const {
1764 if (Kind != k_VectorIndex) return false;
1765 return VectorIndex.Val < 8;
1766 }
1767
1768 bool isVectorIndex16() const {
1769 if (Kind != k_VectorIndex) return false;
1770 return VectorIndex.Val < 4;
1771 }
1772
1773 bool isVectorIndex32() const {
1774 if (Kind != k_VectorIndex) return false;
1775 return VectorIndex.Val < 2;
1776 }
1777 bool isVectorIndex64() const {
1778 if (Kind != k_VectorIndex) return false;
1779 return VectorIndex.Val < 1;
1780 }
1781
1782 bool isNEONi8splat() const {
1783 if (!isImm()) return false;
1784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1785 // Must be a constant.
1786 if (!CE) return false;
1787 int64_t Value = CE->getValue();
1788 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1789 // value.
1790 return Value >= 0 && Value < 256;
1791 }
1792
1793 bool isNEONi16splat() const {
1794 if (isNEONByteReplicate(2))
1795 return false; // Leave that for bytes replication and forbid by default.
1796 if (!isImm())
1797 return false;
1798 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1799 // Must be a constant.
1800 if (!CE) return false;
1801 unsigned Value = CE->getValue();
1802 return ARM_AM::isNEONi16splat(Value);
1803 }
1804
1805 bool isNEONi16splatNot() const {
1806 if (!isImm())
1807 return false;
1808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1809 // Must be a constant.
1810 if (!CE) return false;
1811 unsigned Value = CE->getValue();
1812 return ARM_AM::isNEONi16splat(~Value & 0xffff);
1813 }
1814
1815 bool isNEONi32splat() const {
1816 if (isNEONByteReplicate(4))
1817 return false; // Leave that for bytes replication and forbid by default.
1818 if (!isImm())
1819 return false;
1820 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1821 // Must be a constant.
1822 if (!CE) return false;
1823 unsigned Value = CE->getValue();
1824 return ARM_AM::isNEONi32splat(Value);
1825 }
1826
1827 bool isNEONi32splatNot() const {
1828 if (!isImm())
1829 return false;
1830 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1831 // Must be a constant.
1832 if (!CE) return false;
1833 unsigned Value = CE->getValue();
1834 return ARM_AM::isNEONi32splat(~Value);
1835 }
1836
1837 bool isNEONByteReplicate(unsigned NumBytes) const {
1838 if (!isImm())
1839 return false;
1840 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1841 // Must be a constant.
1842 if (!CE)
1843 return false;
1844 int64_t Value = CE->getValue();
1845 if (!Value)
1846 return false; // Don't bother with zero.
1847
1848 unsigned char B = Value & 0xff;
1849 for (unsigned i = 1; i < NumBytes; ++i) {
1850 Value >>= 8;
1851 if ((Value & 0xff) != B)
1852 return false;
1853 }
1854 return true;
1855 }
1856
1857 bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
1858 bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
1859
1860 bool isNEONi32vmov() const {
1861 if (isNEONByteReplicate(4))
1862 return false; // Let it to be classified as byte-replicate case.
1863 if (!isImm())
1864 return false;
1865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1866 // Must be a constant.
1867 if (!CE)
1868 return false;
1869 int64_t Value = CE->getValue();
1870 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1871 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1872 // FIXME: This is probably wrong and a copy and paste from previous example
1873 return (Value >= 0 && Value < 256) ||
1874 (Value >= 0x0100 && Value <= 0xff00) ||
1875 (Value >= 0x010000 && Value <= 0xff0000) ||
1876 (Value >= 0x01000000 && Value <= 0xff000000) ||
1877 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1878 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1879 }
1880
1881 bool isNEONi32vmovNeg() const {
1882 if (!isImm()) return false;
1883 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1884 // Must be a constant.
1885 if (!CE) return false;
1886 int64_t Value = ~CE->getValue();
1887 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1888 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1889 // FIXME: This is probably wrong and a copy and paste from previous example
1890 return (Value >= 0 && Value < 256) ||
1891 (Value >= 0x0100 && Value <= 0xff00) ||
1892 (Value >= 0x010000 && Value <= 0xff0000) ||
1893 (Value >= 0x01000000 && Value <= 0xff000000) ||
1894 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1895 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1896 }
1897
1898 bool isNEONi64splat() const {
1899 if (!isImm()) return false;
1900 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1901 // Must be a constant.
1902 if (!CE) return false;
1903 uint64_t Value = CE->getValue();
1904 // i64 value with each byte being either 0 or 0xff.
1905 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1906 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1907 return true;
1908 }
1909
1910 template<int64_t Angle, int64_t Remainder>
1911 bool isComplexRotation() const {
1912 if (!isImm()) return false;
1913
1914 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1915 if (!CE) return false;
1916 uint64_t Value = CE->getValue();
1917
1918 return (Value % Angle == Remainder && Value <= 270);
1919 }
1920
1921 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1922 // Add as immediates when possible. Null MCExpr = 0.
1923 if (!Expr)
1924 Inst.addOperand(MCOperand::createImm(0));
1925 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1926 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1927 else
1928 Inst.addOperand(MCOperand::createExpr(Expr));
1929 }
1930
1931 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
1932 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1932, __extension__ __PRETTY_FUNCTION__))
;
1933 addExpr(Inst, getImm());
1934 }
1935
1936 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
1937 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1937, __extension__ __PRETTY_FUNCTION__))
;
1938 addExpr(Inst, getImm());
1939 }
1940
1941 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1942 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1942, __extension__ __PRETTY_FUNCTION__))
;
1943 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1944 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1945 Inst.addOperand(MCOperand::createReg(RegNum));
1946 }
1947
1948 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1949 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1949, __extension__ __PRETTY_FUNCTION__))
;
1950 Inst.addOperand(MCOperand::createImm(getCoproc()));
1951 }
1952
1953 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1954 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1954, __extension__ __PRETTY_FUNCTION__))
;
1955 Inst.addOperand(MCOperand::createImm(getCoproc()));
1956 }
1957
1958 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1959 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1959, __extension__ __PRETTY_FUNCTION__))
;
1960 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
1961 }
1962
1963 void addITMaskOperands(MCInst &Inst, unsigned N) const {
1964 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1964, __extension__ __PRETTY_FUNCTION__))
;
1965 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
1966 }
1967
1968 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1969 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1969, __extension__ __PRETTY_FUNCTION__))
;
1970 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1971 }
1972
1973 void addCCOutOperands(MCInst &Inst, unsigned N) const {
1974 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1974, __extension__ __PRETTY_FUNCTION__))
;
1975 Inst.addOperand(MCOperand::createReg(getReg()));
1976 }
1977
1978 void addRegOperands(MCInst &Inst, unsigned N) const {
1979 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1979, __extension__ __PRETTY_FUNCTION__))
;
1980 Inst.addOperand(MCOperand::createReg(getReg()));
1981 }
1982
1983 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1984 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
;
1985 assert(isRegShiftedReg() &&(static_cast <bool> (isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!"
) ? void (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1986, __extension__ __PRETTY_FUNCTION__))
1986 "addRegShiftedRegOperands() on non-RegShiftedReg!")(static_cast <bool> (isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!"
) ? void (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1986, __extension__ __PRETTY_FUNCTION__))
;
1987 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
1988 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
1989 Inst.addOperand(MCOperand::createImm(
1990 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1991 }
1992
1993 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1994 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1994, __extension__ __PRETTY_FUNCTION__))
;
1995 assert(isRegShiftedImm() &&(static_cast <bool> (isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!"
) ? void (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1996, __extension__ __PRETTY_FUNCTION__))
1996 "addRegShiftedImmOperands() on non-RegShiftedImm!")(static_cast <bool> (isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!"
) ? void (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 1996, __extension__ __PRETTY_FUNCTION__))
;
1997 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
1998 // Shift of #32 is encoded as 0 where permitted
1999 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2000 Inst.addOperand(MCOperand::createImm(
2001 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2002 }
2003
2004 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
;
2006 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2007 ShifterImm.Imm));
2008 }
2009
2010 void addRegListOperands(MCInst &Inst, unsigned N) const {
2011 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2011, __extension__ __PRETTY_FUNCTION__))
;
2012 const SmallVectorImpl<unsigned> &RegList = getRegList();
2013 for (SmallVectorImpl<unsigned>::const_iterator
2014 I = RegList.begin(), E = RegList.end(); I != E; ++I)
2015 Inst.addOperand(MCOperand::createReg(*I));
2016 }
2017
2018 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2019 addRegListOperands(Inst, N);
2020 }
2021
2022 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2023 addRegListOperands(Inst, N);
2024 }
2025
2026 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2027 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2027, __extension__ __PRETTY_FUNCTION__))
;
2028 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2029 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2030 }
2031
2032 void addModImmOperands(MCInst &Inst, unsigned N) const {
2033 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2033, __extension__ __PRETTY_FUNCTION__))
;
2034
2035 // Support for fixups (MCFixup)
2036 if (isImm())
2037 return addImmOperands(Inst, N);
2038
2039 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2040 }
2041
2042 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2043 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2043, __extension__ __PRETTY_FUNCTION__))
;
2044 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2045 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2046 Inst.addOperand(MCOperand::createImm(Enc));
2047 }
2048
2049 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2050, __extension__ __PRETTY_FUNCTION__))
;
2051 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2052 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2053 Inst.addOperand(MCOperand::createImm(Enc));
2054 }
2055
2056 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2057 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2057, __extension__ __PRETTY_FUNCTION__))
;
2058 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2059 uint32_t Val = -CE->getValue();
2060 Inst.addOperand(MCOperand::createImm(Val));
2061 }
2062
2063 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2064 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2064, __extension__ __PRETTY_FUNCTION__))
;
2065 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2066 uint32_t Val = -CE->getValue();
2067 Inst.addOperand(MCOperand::createImm(Val));
2068 }
2069
2070 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2071 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2071, __extension__ __PRETTY_FUNCTION__))
;
2072 // Munge the lsb/width into a bitfield mask.
2073 unsigned lsb = Bitfield.LSB;
2074 unsigned width = Bitfield.Width;
2075 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2076 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2077 (32 - (lsb + width)));
2078 Inst.addOperand(MCOperand::createImm(Mask));
2079 }
2080
2081 void addImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2082, __extension__ __PRETTY_FUNCTION__))
;
2083 addExpr(Inst, getImm());
2084 }
2085
2086 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2087 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2087, __extension__ __PRETTY_FUNCTION__))
;
2088 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2089 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2090 }
2091
2092 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2093 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2093, __extension__ __PRETTY_FUNCTION__))
;
2094 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2095 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2096 }
2097
2098 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2099, __extension__ __PRETTY_FUNCTION__))
;
2100 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2101 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2102 Inst.addOperand(MCOperand::createImm(Val));
2103 }
2104
2105 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2106 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2106, __extension__ __PRETTY_FUNCTION__))
;
2107 // FIXME: We really want to scale the value here, but the LDRD/STRD
2108 // instruction don't encode operands that way yet.
2109 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2110 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2111 }
2112
2113 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2114 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2114, __extension__ __PRETTY_FUNCTION__))
;
2115 // The immediate is scaled by four in the encoding and is stored
2116 // in the MCInst as such. Lop off the low two bits here.
2117 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2118 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2119 }
2120
2121 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2122 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2122, __extension__ __PRETTY_FUNCTION__))
;
2123 // The immediate is scaled by four in the encoding and is stored
2124 // in the MCInst as such. Lop off the low two bits here.
2125 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2126 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2127 }
2128
2129 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2130 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2130, __extension__ __PRETTY_FUNCTION__))
;
2131 // The immediate is scaled by four in the encoding and is stored
2132 // in the MCInst as such. Lop off the low two bits here.
2133 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2134 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2135 }
2136
2137 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2138 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2138, __extension__ __PRETTY_FUNCTION__))
;
2139 // The constant encodes as the immediate-1, and we store in the instruction
2140 // the bits as encoded, so subtract off one here.
2141 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2142 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2143 }
2144
2145 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2146 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2146, __extension__ __PRETTY_FUNCTION__))
;
2147 // The constant encodes as the immediate-1, and we store in the instruction
2148 // the bits as encoded, so subtract off one here.
2149 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2150 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2151 }
2152
2153 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2154 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2154, __extension__ __PRETTY_FUNCTION__))
;
2155 // The constant encodes as the immediate, except for 32, which encodes as
2156 // zero.
2157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2158 unsigned Imm = CE->getValue();
2159 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2160 }
2161
2162 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2163 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2163, __extension__ __PRETTY_FUNCTION__))
;
2164 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2165 // the instruction as well.
2166 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2167 int Val = CE->getValue();
2168 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2169 }
2170
2171 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2172 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2172, __extension__ __PRETTY_FUNCTION__))
;
2173 // The operand is actually a t2_so_imm, but we have its bitwise
2174 // negation in the assembly source, so twiddle it here.
2175 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2176 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2177 }
2178
2179 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2180, __extension__ __PRETTY_FUNCTION__))
;
2181 // The operand is actually a t2_so_imm, but we have its
2182 // negation in the assembly source, so twiddle it here.
2183 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2184 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2185 }
2186
2187 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2188 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2188, __extension__ __PRETTY_FUNCTION__))
;
2189 // The operand is actually an imm0_4095, but we have its
2190 // negation in the assembly source, so twiddle it here.
2191 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2192 Inst.addOperand(MCOperand::createImm(-CE->getValue()));
2193 }
2194
2195 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2196 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2197 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2198 return;
2199 }
2200
2201 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2202 assert(SR && "Unknown value type!")(static_cast <bool> (SR && "Unknown value type!"
) ? void (0) : __assert_fail ("SR && \"Unknown value type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2202, __extension__ __PRETTY_FUNCTION__))
;
2203 Inst.addOperand(MCOperand::createExpr(SR));
2204 }
2205
2206 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2207 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2207, __extension__ __PRETTY_FUNCTION__))
;
2208 if (isImm()) {
2209 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2210 if (CE) {
2211 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2212 return;
2213 }
2214
2215 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2216
2217 assert(SR && "Unknown value type!")(static_cast <bool> (SR && "Unknown value type!"
) ? void (0) : __assert_fail ("SR && \"Unknown value type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2217, __extension__ __PRETTY_FUNCTION__))
;
2218 Inst.addOperand(MCOperand::createExpr(SR));
2219 return;
2220 }
2221
2222 assert(isMem() && "Unknown value type!")(static_cast <bool> (isMem() && "Unknown value type!"
) ? void (0) : __assert_fail ("isMem() && \"Unknown value type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2222, __extension__ __PRETTY_FUNCTION__))
;
2223 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!")(static_cast <bool> (isa<MCConstantExpr>(Memory.OffsetImm
) && "Unknown value type!") ? void (0) : __assert_fail
("isa<MCConstantExpr>(Memory.OffsetImm) && \"Unknown value type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2223, __extension__ __PRETTY_FUNCTION__))
;
2224 Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2225 }
2226
2227 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2228 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2228, __extension__ __PRETTY_FUNCTION__))
;
2229 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2230 }
2231
2232 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2233 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2233, __extension__ __PRETTY_FUNCTION__))
;
2234 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2235 }
2236
2237 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2238 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2238, __extension__ __PRETTY_FUNCTION__))
;
2239 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2240 }
2241
2242 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2243 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2243, __extension__ __PRETTY_FUNCTION__))
;
2244 int32_t Imm = Memory.OffsetImm->getValue();
2245 Inst.addOperand(MCOperand::createImm(Imm));
2246 }
2247
2248 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2249 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2249, __extension__ __PRETTY_FUNCTION__))
;
2250 assert(isImm() && "Not an immediate!")(static_cast <bool> (isImm() && "Not an immediate!"
) ? void (0) : __assert_fail ("isImm() && \"Not an immediate!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2250, __extension__ __PRETTY_FUNCTION__))
;
2251
2252 // If we have an immediate that's not a constant, treat it as a label
2253 // reference needing a fixup.
2254 if (!isa<MCConstantExpr>(getImm())) {
2255 Inst.addOperand(MCOperand::createExpr(getImm()));
2256 return;
2257 }
2258
2259 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2260 int Val = CE->getValue();
2261 Inst.addOperand(MCOperand::createImm(Val));
2262 }
2263
2264 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2265 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2267 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2268 }
2269
2270 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2271 addAlignedMemoryOperands(Inst, N);
2272 }
2273
2274 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2275 addAlignedMemoryOperands(Inst, N);
2276 }
2277
2278 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2279 addAlignedMemoryOperands(Inst, N);
2280 }
2281
2282 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2283 addAlignedMemoryOperands(Inst, N);
2284 }
2285
2286 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2287 addAlignedMemoryOperands(Inst, N);
2288 }
2289
2290 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2291 addAlignedMemoryOperands(Inst, N);
2292 }
2293
2294 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2295 addAlignedMemoryOperands(Inst, N);
2296 }
2297
2298 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2299 addAlignedMemoryOperands(Inst, N);
2300 }
2301
2302 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2303 addAlignedMemoryOperands(Inst, N);
2304 }
2305
2306 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2307 addAlignedMemoryOperands(Inst, N);
2308 }
2309
2310 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2311 addAlignedMemoryOperands(Inst, N);
2312 }
2313
2314 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2315 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2315, __extension__ __PRETTY_FUNCTION__))
;
2316 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2317 if (!Memory.OffsetRegNum) {
2318 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2319 // Special case for #-0
2320 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2321 if (Val < 0) Val = -Val;
2322 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2323 } else {
2324 // For register offset, we encode the shift type and negation flag
2325 // here.
2326 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2327 Memory.ShiftImm, Memory.ShiftType);
2328 }
2329 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2330 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2331 Inst.addOperand(MCOperand::createImm(Val));
2332 }
2333
2334 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2335 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2335, __extension__ __PRETTY_FUNCTION__))
;
2336 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2337 assert(CE && "non-constant AM2OffsetImm operand!")(static_cast <bool> (CE && "non-constant AM2OffsetImm operand!"
) ? void (0) : __assert_fail ("CE && \"non-constant AM2OffsetImm operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2337, __extension__ __PRETTY_FUNCTION__))
;
2338 int32_t Val = CE->getValue();
2339 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2340 // Special case for #-0
2341 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2342 if (Val < 0) Val = -Val;
2343 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2344 Inst.addOperand(MCOperand::createReg(0));
2345 Inst.addOperand(MCOperand::createImm(Val));
2346 }
2347
2348 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2349 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2349, __extension__ __PRETTY_FUNCTION__))
;
2350 // If we have an immediate that's not a constant, treat it as a label
2351 // reference needing a fixup. If it is a constant, it's something else
2352 // and we reject it.
2353 if (isImm()) {
2354 Inst.addOperand(MCOperand::createExpr(getImm()));
2355 Inst.addOperand(MCOperand::createReg(0));
2356 Inst.addOperand(MCOperand::createImm(0));
2357 return;
2358 }
2359
2360 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2361 if (!Memory.OffsetRegNum) {
2362 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2363 // Special case for #-0
2364 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2365 if (Val < 0) Val = -Val;
2366 Val = ARM_AM::getAM3Opc(AddSub, Val);
2367 } else {
2368 // For register offset, we encode the shift type and negation flag
2369 // here.
2370 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2371 }
2372 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2373 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2374 Inst.addOperand(MCOperand::createImm(Val));
2375 }
2376
2377 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2378 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2378, __extension__ __PRETTY_FUNCTION__))
;
2379 if (Kind == k_PostIndexRegister) {
2380 int32_t Val =
2381 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2382 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2383 Inst.addOperand(MCOperand::createImm(Val));
2384 return;
2385 }
2386
2387 // Constant offset.
2388 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2389 int32_t Val = CE->getValue();
2390 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2391 // Special case for #-0
2392 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2393 if (Val < 0) Val = -Val;
2394 Val = ARM_AM::getAM3Opc(AddSub, Val);
2395 Inst.addOperand(MCOperand::createReg(0));
2396 Inst.addOperand(MCOperand::createImm(Val));
2397 }
2398
2399 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2400 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2400, __extension__ __PRETTY_FUNCTION__))
;
2401 // If we have an immediate that's not a constant, treat it as a label
2402 // reference needing a fixup. If it is a constant, it's something else
2403 // and we reject it.
2404 if (isImm()) {
2405 Inst.addOperand(MCOperand::createExpr(getImm()));
2406 Inst.addOperand(MCOperand::createImm(0));
2407 return;
2408 }
2409
2410 // The lower two bits are always zero and as such are not encoded.
2411 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2412 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2413 // Special case for #-0
2414 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2415 if (Val < 0) Val = -Val;
2416 Val = ARM_AM::getAM5Opc(AddSub, Val);
2417 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2418 Inst.addOperand(MCOperand::createImm(Val));
2419 }
2420
2421 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2422 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2422, __extension__ __PRETTY_FUNCTION__))
;
2423 // If we have an immediate that's not a constant, treat it as a label
2424 // reference needing a fixup. If it is a constant, it's something else
2425 // and we reject it.
2426 if (isImm()) {
2427 Inst.addOperand(MCOperand::createExpr(getImm()));
2428 Inst.addOperand(MCOperand::createImm(0));
2429 return;
2430 }
2431
2432 // The lower bit is always zero and as such is not encoded.
2433 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2434 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2435 // Special case for #-0
2436 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2437 if (Val < 0) Val = -Val;
2438 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2439 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2440 Inst.addOperand(MCOperand::createImm(Val));
2441 }
2442
2443 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2444 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2444, __extension__ __PRETTY_FUNCTION__))
;
2445 // If we have an immediate that's not a constant, treat it as a label
2446 // reference needing a fixup. If it is a constant, it's something else
2447 // and we reject it.
2448 if (isImm()) {
2449 Inst.addOperand(MCOperand::createExpr(getImm()));
2450 Inst.addOperand(MCOperand::createImm(0));
2451 return;
2452 }
2453
2454 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2455 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2456 Inst.addOperand(MCOperand::createImm(Val));
2457 }
2458
2459 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2460 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2460, __extension__ __PRETTY_FUNCTION__))
;
2461 // The lower two bits are always zero and as such are not encoded.
2462 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2463 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2464 Inst.addOperand(MCOperand::createImm(Val));
2465 }
2466
2467 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2468 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2468, __extension__ __PRETTY_FUNCTION__))
;
2469 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2470 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2471 Inst.addOperand(MCOperand::createImm(Val));
2472 }
2473
2474 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2475 addMemImm8OffsetOperands(Inst, N);
2476 }
2477
2478 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2479 addMemImm8OffsetOperands(Inst, N);
2480 }
2481
2482 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2483 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2483, __extension__ __PRETTY_FUNCTION__))
;
2484 // If this is an immediate, it's a label reference.
2485 if (isImm()) {
2486 addExpr(Inst, getImm());
2487 Inst.addOperand(MCOperand::createImm(0));
2488 return;
2489 }
2490
2491 // Otherwise, it's a normal memory reg+offset.
2492 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2493 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2494 Inst.addOperand(MCOperand::createImm(Val));
2495 }
2496
2497 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2498 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2498, __extension__ __PRETTY_FUNCTION__))
;
2499 // If this is an immediate, it's a label reference.
2500 if (isImm()) {
2501 addExpr(Inst, getImm());
2502 Inst.addOperand(MCOperand::createImm(0));
2503 return;
2504 }
2505
2506 // Otherwise, it's a normal memory reg+offset.
2507 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2508 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2509 Inst.addOperand(MCOperand::createImm(Val));
2510 }
2511
2512 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2513 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2513, __extension__ __PRETTY_FUNCTION__))
;
2514 // This is container for the immediate that we will create the constant
2515 // pool from
2516 addExpr(Inst, getConstantPoolImm());
2517 return;
2518 }
2519
2520 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2521 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2521, __extension__ __PRETTY_FUNCTION__))
;
2522 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2523 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2524 }
2525
2526 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2527 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2527, __extension__ __PRETTY_FUNCTION__))
;
2528 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2529 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2530 }
2531
2532 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2533 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2533, __extension__ __PRETTY_FUNCTION__))
;
2534 unsigned Val =
2535 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2536 Memory.ShiftImm, Memory.ShiftType);
2537 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2538 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2539 Inst.addOperand(MCOperand::createImm(Val));
2540 }
2541
2542 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2543 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2543, __extension__ __PRETTY_FUNCTION__))
;
2544 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2545 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2546 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2547 }
2548
2549 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2550 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2550, __extension__ __PRETTY_FUNCTION__))
;
2551 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2552 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2553 }
2554
2555 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2556 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2556, __extension__ __PRETTY_FUNCTION__))
;
2557 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2558 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2559 Inst.addOperand(MCOperand::createImm(Val));
2560 }
2561
2562 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2563 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2563, __extension__ __PRETTY_FUNCTION__))
;
2564 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2565 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2566 Inst.addOperand(MCOperand::createImm(Val));
2567 }
2568
2569 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2570 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2570, __extension__ __PRETTY_FUNCTION__))
;
2571 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2572 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2573 Inst.addOperand(MCOperand::createImm(Val));
2574 }
2575
2576 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2577 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2577, __extension__ __PRETTY_FUNCTION__))
;
2578 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2579 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2580 Inst.addOperand(MCOperand::createImm(Val));
2581 }
2582
2583 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2584 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2584, __extension__ __PRETTY_FUNCTION__))
;
2585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2586 assert(CE && "non-constant post-idx-imm8 operand!")(static_cast <bool> (CE && "non-constant post-idx-imm8 operand!"
) ? void (0) : __assert_fail ("CE && \"non-constant post-idx-imm8 operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2586, __extension__ __PRETTY_FUNCTION__))
;
2587 int Imm = CE->getValue();
2588 bool isAdd = Imm >= 0;
2589 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2590 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2591 Inst.addOperand(MCOperand::createImm(Imm));
2592 }
2593
2594 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2595 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2595, __extension__ __PRETTY_FUNCTION__))
;
2596 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2597 assert(CE && "non-constant post-idx-imm8s4 operand!")(static_cast <bool> (CE && "non-constant post-idx-imm8s4 operand!"
) ? void (0) : __assert_fail ("CE && \"non-constant post-idx-imm8s4 operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2597, __extension__ __PRETTY_FUNCTION__))
;
2598 int Imm = CE->getValue();
2599 bool isAdd = Imm >= 0;
2600 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2601 // Immediate is scaled by 4.
2602 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2603 Inst.addOperand(MCOperand::createImm(Imm));
2604 }
2605
2606 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2607 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2607, __extension__ __PRETTY_FUNCTION__))
;
2608 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2609 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2610 }
2611
2612 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2613 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2613, __extension__ __PRETTY_FUNCTION__))
;
2614 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2615 // The sign, shift type, and shift amount are encoded in a single operand
2616 // using the AM2 encoding helpers.
2617 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2618 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2619 PostIdxReg.ShiftTy);
2620 Inst.addOperand(MCOperand::createImm(Imm));
2621 }
2622
2623 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2624 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__))
;
2625 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2626 }
2627
2628 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2629 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2629, __extension__ __PRETTY_FUNCTION__))
;
2630 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2631 }
2632
2633 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2634 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2634, __extension__ __PRETTY_FUNCTION__))
;
2635 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2636 }
2637
2638 void addVecListOperands(MCInst &Inst, unsigned N) const {
2639 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2639, __extension__ __PRETTY_FUNCTION__))
;
2640 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2641 }
2642
2643 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2644 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2644, __extension__ __PRETTY_FUNCTION__))
;
2645 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2646 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2647 }
2648
2649 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2650 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2650, __extension__ __PRETTY_FUNCTION__))
;
2651 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2652 }
2653
2654 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2655 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2655, __extension__ __PRETTY_FUNCTION__))
;
2656 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2657 }
2658
2659 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2660 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2660, __extension__ __PRETTY_FUNCTION__))
;
2661 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2662 }
2663
2664 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2665 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2665, __extension__ __PRETTY_FUNCTION__))
;
2666 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2667 }
2668
2669 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2670 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2670, __extension__ __PRETTY_FUNCTION__))
;
2671 // The immediate encodes the type of constant as well as the value.
2672 // Mask in that this is an i8 splat.
2673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2674 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2675 }
2676
2677 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2678 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2678, __extension__ __PRETTY_FUNCTION__))
;
2679 // The immediate encodes the type of constant as well as the value.
2680 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2681 unsigned Value = CE->getValue();
2682 Value = ARM_AM::encodeNEONi16splat(Value);
2683 Inst.addOperand(MCOperand::createImm(Value));
2684 }
2685
2686 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2687 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2687, __extension__ __PRETTY_FUNCTION__))
;
2688 // The immediate encodes the type of constant as well as the value.
2689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2690 unsigned Value = CE->getValue();
2691 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2692 Inst.addOperand(MCOperand::createImm(Value));
2693 }
2694
2695 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2696 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2696, __extension__ __PRETTY_FUNCTION__))
;
2697 // The immediate encodes the type of constant as well as the value.
2698 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2699 unsigned Value = CE->getValue();
2700 Value = ARM_AM::encodeNEONi32splat(Value);
2701 Inst.addOperand(MCOperand::createImm(Value));
2702 }
2703
2704 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2705 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2705, __extension__ __PRETTY_FUNCTION__))
;
2706 // The immediate encodes the type of constant as well as the value.
2707 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2708 unsigned Value = CE->getValue();
2709 Value = ARM_AM::encodeNEONi32splat(~Value);
2710 Inst.addOperand(MCOperand::createImm(Value));
2711 }
2712
2713 void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2714 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2714, __extension__ __PRETTY_FUNCTION__))
;
2715 // The immediate encodes the type of constant as well as the value.
2716 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2717 unsigned Value = CE->getValue();
2718 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All vmvn instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All vmvn instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
2719 Inst.getOpcode() == ARM::VMOVv16i8) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All vmvn instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All vmvn instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
2720 "All vmvn instructions that wants to replicate non-zero byte "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All vmvn instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All vmvn instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
2721 "always must be replaced with VMOVv8i8 or VMOVv16i8.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All vmvn instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All vmvn instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
;
2722 unsigned B = ((~Value) & 0xff);
2723 B |= 0xe00; // cmode = 0b1110
2724 Inst.addOperand(MCOperand::createImm(B));
2725 }
2726
2727 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2728 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2728, __extension__ __PRETTY_FUNCTION__))
;
2729 // The immediate encodes the type of constant as well as the value.
2730 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2731 unsigned Value = CE->getValue();
2732 if (Value >= 256 && Value <= 0xffff)
2733 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2734 else if (Value > 0xffff && Value <= 0xffffff)
2735 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2736 else if (Value > 0xffffff)
2737 Value = (Value >> 24) | 0x600;
2738 Inst.addOperand(MCOperand::createImm(Value));
2739 }
2740
2741 void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2742 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2742, __extension__ __PRETTY_FUNCTION__))
;
2743 // The immediate encodes the type of constant as well as the value.
2744 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2745 unsigned Value = CE->getValue();
2746 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2749, __extension__ __PRETTY_FUNCTION__))
2747 Inst.getOpcode() == ARM::VMOVv16i8) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2749, __extension__ __PRETTY_FUNCTION__))
2748 "All instructions that wants to replicate non-zero byte "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2749, __extension__ __PRETTY_FUNCTION__))
2749 "always must be replaced with VMOVv8i8 or VMOVv16i8.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2749, __extension__ __PRETTY_FUNCTION__))
;
2750 unsigned B = Value & 0xff;
2751 B |= 0xe00; // cmode = 0b1110
2752 Inst.addOperand(MCOperand::createImm(B));
2753 }
2754
2755 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2756 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2756, __extension__ __PRETTY_FUNCTION__))
;
2757 // The immediate encodes the type of constant as well as the value.
2758 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2759 unsigned Value = ~CE->getValue();
2760 if (Value >= 256 && Value <= 0xffff)
2761 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2762 else if (Value > 0xffff && Value <= 0xffffff)
2763 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2764 else if (Value > 0xffffff)
2765 Value = (Value >> 24) | 0x600;
2766 Inst.addOperand(MCOperand::createImm(Value));
2767 }
2768
2769 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2770 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2770, __extension__ __PRETTY_FUNCTION__))
;
2771 // The immediate encodes the type of constant as well as the value.
2772 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2773 uint64_t Value = CE->getValue();
2774 unsigned Imm = 0;
2775 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2776 Imm |= (Value & 1) << i;
2777 }
2778 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2779 }
2780
2781 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2782 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2782, __extension__ __PRETTY_FUNCTION__))
;
2783 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2784 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
2785 }
2786
2787 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2788 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2788, __extension__ __PRETTY_FUNCTION__))
;
2789 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2790 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
2791 }
2792
2793 void print(raw_ostream &OS) const override;
2794
2795 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2796 auto Op = make_unique<ARMOperand>(k_ITCondMask);
2797 Op->ITMask.Mask = Mask;
2798 Op->StartLoc = S;
2799 Op->EndLoc = S;
2800 return Op;
2801 }
2802
2803 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2804 SMLoc S) {
2805 auto Op = make_unique<ARMOperand>(k_CondCode);
2806 Op->CC.Val = CC;
2807 Op->StartLoc = S;
2808 Op->EndLoc = S;
2809 return Op;
2810 }
2811
2812 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2813 auto Op = make_unique<ARMOperand>(k_CoprocNum);
2814 Op->Cop.Val = CopVal;
2815 Op->StartLoc = S;
2816 Op->EndLoc = S;
2817 return Op;
2818 }
2819
2820 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2821 auto Op = make_unique<ARMOperand>(k_CoprocReg);
2822 Op->Cop.Val = CopVal;
2823 Op->StartLoc = S;
2824 Op->EndLoc = S;
2825 return Op;
2826 }
2827
2828 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2829 SMLoc E) {
2830 auto Op = make_unique<ARMOperand>(k_CoprocOption);
2831 Op->Cop.Val = Val;
2832 Op->StartLoc = S;
2833 Op->EndLoc = E;
2834 return Op;
2835 }
2836
2837 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2838 auto Op = make_unique<ARMOperand>(k_CCOut);
2839 Op->Reg.RegNum = RegNum;
2840 Op->StartLoc = S;
2841 Op->EndLoc = S;
2842 return Op;
2843 }
2844
2845 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2846 auto Op = make_unique<ARMOperand>(k_Token);
2847 Op->Tok.Data = Str.data();
2848 Op->Tok.Length = Str.size();
2849 Op->StartLoc = S;
2850 Op->EndLoc = S;
2851 return Op;
2852 }
2853
2854 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2855 SMLoc E) {
2856 auto Op = make_unique<ARMOperand>(k_Register);
2857 Op->Reg.RegNum = RegNum;
2858 Op->StartLoc = S;
2859 Op->EndLoc = E;
2860 return Op;
2861 }
2862
2863 static std::unique_ptr<ARMOperand>
2864 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2865 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2866 SMLoc E) {
2867 auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2868 Op->RegShiftedReg.ShiftTy = ShTy;
2869 Op->RegShiftedReg.SrcReg = SrcReg;
2870 Op->RegShiftedReg.ShiftReg = ShiftReg;
2871 Op->RegShiftedReg.ShiftImm = ShiftImm;
2872 Op->StartLoc = S;
2873 Op->EndLoc = E;
2874 return Op;
2875 }
2876
2877 static std::unique_ptr<ARMOperand>
2878 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2879 unsigned ShiftImm, SMLoc S, SMLoc E) {
2880 auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2881 Op->RegShiftedImm.ShiftTy = ShTy;
2882 Op->RegShiftedImm.SrcReg = SrcReg;
2883 Op->RegShiftedImm.ShiftImm = ShiftImm;
2884 Op->StartLoc = S;
2885 Op->EndLoc = E;
2886 return Op;
2887 }
2888
2889 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2890 SMLoc S, SMLoc E) {
2891 auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2892 Op->ShifterImm.isASR = isASR;
2893 Op->ShifterImm.Imm = Imm;
2894 Op->StartLoc = S;
2895 Op->EndLoc = E;
2896 return Op;
2897 }
2898
2899 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2900 SMLoc E) {
2901 auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2902 Op->RotImm.Imm = Imm;
2903 Op->StartLoc = S;
2904 Op->EndLoc = E;
2905 return Op;
2906 }
2907
2908 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2909 SMLoc S, SMLoc E) {
2910 auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2911 Op->ModImm.Bits = Bits;
2912 Op->ModImm.Rot = Rot;
2913 Op->StartLoc = S;
2914 Op->EndLoc = E;
2915 return Op;
2916 }
2917
2918 static std::unique_ptr<ARMOperand>
2919 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2920 auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
2921 Op->Imm.Val = Val;
2922 Op->StartLoc = S;
2923 Op->EndLoc = E;
2924 return Op;
2925 }
2926
2927 static std::unique_ptr<ARMOperand>
2928 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2929 auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2930 Op->Bitfield.LSB = LSB;
2931 Op->Bitfield.Width = Width;
2932 Op->StartLoc = S;
2933 Op->EndLoc = E;
2934 return Op;
2935 }
2936
2937 static std::unique_ptr<ARMOperand>
2938 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2939 SMLoc StartLoc, SMLoc EndLoc) {
2940 assert(Regs.size() > 0 && "RegList contains no registers?")(static_cast <bool> (Regs.size() > 0 && "RegList contains no registers?"
) ? void (0) : __assert_fail ("Regs.size() > 0 && \"RegList contains no registers?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 2940, __extension__ __PRETTY_FUNCTION__))
;
2941 KindTy Kind = k_RegisterList;
2942
2943 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2944 Kind = k_DPRRegisterList;
2945 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2946 contains(Regs.front().second))
2947 Kind = k_SPRRegisterList;
2948
2949 // Sort based on the register encoding values.
2950 array_pod_sort(Regs.begin(), Regs.end());
2951
2952 auto Op = make_unique<ARMOperand>(Kind);
2953 for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
2954 I = Regs.begin(), E = Regs.end(); I != E; ++I)
2955 Op->Registers.push_back(I->second);
2956 Op->StartLoc = StartLoc;
2957 Op->EndLoc = EndLoc;
2958 return Op;
2959 }
2960
2961 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2962 unsigned Count,
2963 bool isDoubleSpaced,
2964 SMLoc S, SMLoc E) {
2965 auto Op = make_unique<ARMOperand>(k_VectorList);
2966 Op->VectorList.RegNum = RegNum;
2967 Op->VectorList.Count = Count;
2968 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2969 Op->StartLoc = S;
2970 Op->EndLoc = E;
2971 return Op;
2972 }
2973
2974 static std::unique_ptr<ARMOperand>
2975 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2976 SMLoc S, SMLoc E) {
2977 auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2978 Op->VectorList.RegNum = RegNum;
2979 Op->VectorList.Count = Count;
2980 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2981 Op->StartLoc = S;
2982 Op->EndLoc = E;
2983 return Op;
2984 }
2985
2986 static std::unique_ptr<ARMOperand>
2987 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2988 bool isDoubleSpaced, SMLoc S, SMLoc E) {
2989 auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2990 Op->VectorList.RegNum = RegNum;
2991 Op->VectorList.Count = Count;
2992 Op->VectorList.LaneIndex = Index;
2993 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2994 Op->StartLoc = S;
2995 Op->EndLoc = E;
2996 return Op;
2997 }
2998
2999 static std::unique_ptr<ARMOperand>
3000 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3001 auto Op = make_unique<ARMOperand>(k_VectorIndex);
3002 Op->VectorIndex.Val = Idx;
3003 Op->StartLoc = S;
3004 Op->EndLoc = E;
3005 return Op;
3006 }
3007
3008 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3009 SMLoc E) {
3010 auto Op = make_unique<ARMOperand>(k_Immediate);
3011 Op->Imm.Val = Val;
3012 Op->StartLoc = S;
3013 Op->EndLoc = E;
3014 return Op;
3015 }
3016
3017 static std::unique_ptr<ARMOperand>
3018 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3019 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3020 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3021 SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3022 auto Op = make_unique<ARMOperand>(k_Memory);
3023 Op->Memory.BaseRegNum = BaseRegNum;
3024 Op->Memory.OffsetImm = OffsetImm;
3025 Op->Memory.OffsetRegNum = OffsetRegNum;
3026 Op->Memory.ShiftType = ShiftType;
3027 Op->Memory.ShiftImm = ShiftImm;
3028 Op->Memory.Alignment = Alignment;
3029 Op->Memory.isNegative = isNegative;
3030 Op->StartLoc = S;
3031 Op->EndLoc = E;
3032 Op->AlignmentLoc = AlignmentLoc;
3033 return Op;
3034 }
3035
3036 static std::unique_ptr<ARMOperand>
3037 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3038 unsigned ShiftImm, SMLoc S, SMLoc E) {
3039 auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3040 Op->PostIdxReg.RegNum = RegNum;
3041 Op->PostIdxReg.isAdd = isAdd;
3042 Op->PostIdxReg.ShiftTy = ShiftTy;
3043 Op->PostIdxReg.ShiftImm = ShiftImm;
3044 Op->StartLoc = S;
3045 Op->EndLoc = E;
3046 return Op;
3047 }
3048
3049 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3050 SMLoc S) {
3051 auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3052 Op->MBOpt.Val = Opt;
3053 Op->StartLoc = S;
3054 Op->EndLoc = S;
3055 return Op;
3056 }
3057
3058 static std::unique_ptr<ARMOperand>
3059 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3060 auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3061 Op->ISBOpt.Val = Opt;
3062 Op->StartLoc = S;
3063 Op->EndLoc = S;
3064 return Op;
3065 }
3066
3067 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3068 SMLoc S) {
3069 auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3070 Op->IFlags.Val = IFlags;
3071 Op->StartLoc = S;
3072 Op->EndLoc = S;
3073 return Op;
3074 }
3075
3076 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3077 auto Op = make_unique<ARMOperand>(k_MSRMask);
3078 Op->MMask.Val = MMask;
3079 Op->StartLoc = S;
3080 Op->EndLoc = S;
3081 return Op;
3082 }
3083
3084 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3085 auto Op = make_unique<ARMOperand>(k_BankedReg);
3086 Op->BankedReg.Val = Reg;
3087 Op->StartLoc = S;
3088 Op->EndLoc = S;
3089 return Op;
3090 }
3091};
3092
3093} // end anonymous namespace.
3094
3095void ARMOperand::print(raw_ostream &OS) const {
3096 switch (Kind) {
3097 case k_CondCode:
3098 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3099 break;
3100 case k_CCOut:
3101 OS << "<ccout " << getReg() << ">";
3102 break;
3103 case k_ITCondMask: {
3104 static const char *const MaskStr[] = {
3105 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
3106 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
3107 };
3108 assert((ITMask.Mask & 0xf) == ITMask.Mask)(static_cast <bool> ((ITMask.Mask & 0xf) == ITMask.
Mask) ? void (0) : __assert_fail ("(ITMask.Mask & 0xf) == ITMask.Mask"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3108, __extension__ __PRETTY_FUNCTION__))
;
3109 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3110 break;
3111 }
3112 case k_CoprocNum:
3113 OS << "<coprocessor number: " << getCoproc() << ">";
3114 break;
3115 case k_CoprocReg:
3116 OS << "<coprocessor register: " << getCoproc() << ">";
3117 break;
3118 case k_CoprocOption:
3119 OS << "<coprocessor option: " << CoprocOption.Val << ">";
3120 break;
3121 case k_MSRMask:
3122 OS << "<mask: " << getMSRMask() << ">";
3123 break;
3124 case k_BankedReg:
3125 OS << "<banked reg: " << getBankedReg() << ">";
3126 break;
3127 case k_Immediate:
3128 OS << *getImm();
3129 break;
3130 case k_MemBarrierOpt:
3131 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3132 break;
3133 case k_InstSyncBarrierOpt:
3134 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3135 break;
3136 case k_Memory:
3137 OS << "<memory "
3138 << " base:" << Memory.BaseRegNum;
3139 OS << ">";
3140 break;
3141 case k_PostIndexRegister:
3142 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3143 << PostIdxReg.RegNum;
3144 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3145 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3146 << PostIdxReg.ShiftImm;
3147 OS << ">";
3148 break;
3149 case k_ProcIFlags: {
3150 OS << "<ARM_PROC::";
3151 unsigned IFlags = getProcIFlags();
3152 for (int i=2; i >= 0; --i)
3153 if (IFlags & (1 << i))
3154 OS << ARM_PROC::IFlagsToString(1 << i);
3155 OS << ">";
3156 break;
3157 }
3158 case k_Register:
3159 OS << "<register " << getReg() << ">";
3160 break;
3161 case k_ShifterImmediate:
3162 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3163 << " #" << ShifterImm.Imm << ">";
3164 break;
3165 case k_ShiftedRegister:
3166 OS << "<so_reg_reg "
3167 << RegShiftedReg.SrcReg << " "
3168 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
3169 << " " << RegShiftedReg.ShiftReg << ">";
3170 break;
3171 case k_ShiftedImmediate:
3172 OS << "<so_reg_imm "
3173 << RegShiftedImm.SrcReg << " "
3174 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
3175 << " #" << RegShiftedImm.ShiftImm << ">";
3176 break;
3177 case k_RotateImmediate:
3178 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3179 break;
3180 case k_ModifiedImmediate:
3181 OS << "<mod_imm #" << ModImm.Bits << ", #"
3182 << ModImm.Rot << ")>";
3183 break;
3184 case k_ConstantPoolImmediate:
3185 OS << "<constant_pool_imm #" << *getConstantPoolImm();
3186 break;
3187 case k_BitfieldDescriptor:
3188 OS << "<bitfield " << "lsb: " << Bitfield.LSB
3189 << ", width: " << Bitfield.Width << ">";
3190 break;
3191 case k_RegisterList:
3192 case k_DPRRegisterList:
3193 case k_SPRRegisterList: {
3194 OS << "<register_list ";
3195
3196 const SmallVectorImpl<unsigned> &RegList = getRegList();
3197 for (SmallVectorImpl<unsigned>::const_iterator
3198 I = RegList.begin(), E = RegList.end(); I != E; ) {
3199 OS << *I;
3200 if (++I < E) OS << ", ";
3201 }
3202
3203 OS << ">";
3204 break;
3205 }
3206 case k_VectorList:
3207 OS << "<vector_list " << VectorList.Count << " * "
3208 << VectorList.RegNum << ">";
3209 break;
3210 case k_VectorListAllLanes:
3211 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3212 << VectorList.RegNum << ">";
3213 break;
3214 case k_VectorListIndexed:
3215 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3216 << VectorList.Count << " * " << VectorList.RegNum << ">";
3217 break;
3218 case k_Token:
3219 OS << "'" << getToken() << "'";
3220 break;
3221 case k_VectorIndex:
3222 OS << "<vectorindex " << getVectorIndex() << ">";
3223 break;
3224 }
3225}
3226
3227/// @name Auto-generated Match Functions
3228/// {
3229
3230static unsigned MatchRegisterName(StringRef Name);
3231
3232/// }
3233
3234bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3235 SMLoc &StartLoc, SMLoc &EndLoc) {
3236 const AsmToken &Tok = getParser().getTok();
3237 StartLoc = Tok.getLoc();
3238 EndLoc = Tok.getEndLoc();
3239 RegNo = tryParseRegister();
3240
3241 return (RegNo == (unsigned)-1);
3242}
3243
3244/// Try to parse a register name. The token must be an Identifier when called,
3245/// and if it is a register name the token is eaten and the register number is
3246/// returned. Otherwise return -1.
3247int ARMAsmParser::tryParseRegister() {
3248 MCAsmParser &Parser = getParser();
3249 const AsmToken &Tok = Parser.getTok();
3250 if (Tok.isNot(AsmToken::Identifier)) return -1;
3251
3252 std::string lowerCase = Tok.getString().lower();
3253 unsigned RegNum = MatchRegisterName(lowerCase);
3254 if (!RegNum) {
3255 RegNum = StringSwitch<unsigned>(lowerCase)
3256 .Case("r13", ARM::SP)
3257 .Case("r14", ARM::LR)
3258 .Case("r15", ARM::PC)
3259 .Case("ip", ARM::R12)
3260 // Additional register name aliases for 'gas' compatibility.
3261 .Case("a1", ARM::R0)
3262 .Case("a2", ARM::R1)
3263 .Case("a3", ARM::R2)
3264 .Case("a4", ARM::R3)
3265 .Case("v1", ARM::R4)
3266 .Case("v2", ARM::R5)
3267 .Case("v3", ARM::R6)
3268 .Case("v4", ARM::R7)
3269 .Case("v5", ARM::R8)
3270 .Case("v6", ARM::R9)
3271 .Case("v7", ARM::R10)
3272 .Case("v8", ARM::R11)
3273 .Case("sb", ARM::R9)
3274 .Case("sl", ARM::R10)
3275 .Case("fp", ARM::R11)
3276 .Default(0);
3277 }
3278 if (!RegNum) {
3279 // Check for aliases registered via .req. Canonicalize to lower case.
3280 // That's more consistent since register names are case insensitive, and
3281 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3282 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3283 // If no match, return failure.
3284 if (Entry == RegisterReqs.end())
3285 return -1;
3286 Parser.Lex(); // Eat identifier token.
3287 return Entry->getValue();
3288 }
3289
3290 // Some FPUs only have 16 D registers, so D16-D31 are invalid
3291 if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3292 return -1;
3293
3294 Parser.Lex(); // Eat identifier token.
3295
3296 return RegNum;
3297}
3298
3299// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
3300// If a recoverable error occurs, return 1. If an irrecoverable error
3301// occurs, return -1. An irrecoverable error is one where tokens have been
3302// consumed in the process of trying to parse the shifter (i.e., when it is
3303// indeed a shifter operand, but malformed).
3304int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3305 MCAsmParser &Parser = getParser();
3306 SMLoc S = Parser.getTok().getLoc();
3307 const AsmToken &Tok = Parser.getTok();
3308 if (Tok.isNot(AsmToken::Identifier))
3309 return -1;
3310
3311 std::string lowerCase = Tok.getString().lower();
3312 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3313 .Case("asl", ARM_AM::lsl)
3314 .Case("lsl", ARM_AM::lsl)
3315 .Case("lsr", ARM_AM::lsr)
3316 .Case("asr", ARM_AM::asr)
3317 .Case("ror", ARM_AM::ror)
3318 .Case("rrx", ARM_AM::rrx)
3319 .Default(ARM_AM::no_shift);
3320
3321 if (ShiftTy == ARM_AM::no_shift)
3322 return 1;
3323
3324 Parser.Lex(); // Eat the operator.
3325
3326 // The source register for the shift has already been added to the
3327 // operand list, so we need to pop it off and combine it into the shifted
3328 // register operand instead.
3329 std::unique_ptr<ARMOperand> PrevOp(
3330 (ARMOperand *)Operands.pop_back_val().release());
3331 if (!PrevOp->isReg())
3332 return Error(PrevOp->getStartLoc(), "shift must be of a register");
3333 int SrcReg = PrevOp->getReg();
3334
3335 SMLoc EndLoc;
3336 int64_t Imm = 0;
3337 int ShiftReg = 0;
3338 if (ShiftTy == ARM_AM::rrx) {
3339 // RRX Doesn't have an explicit shift amount. The encoder expects
3340 // the shift register to be the same as the source register. Seems odd,
3341 // but OK.
3342 ShiftReg = SrcReg;
3343 } else {
3344 // Figure out if this is shifted by a constant or a register (for non-RRX).
3345 if (Parser.getTok().is(AsmToken::Hash) ||
3346 Parser.getTok().is(AsmToken::Dollar)) {
3347 Parser.Lex(); // Eat hash.
3348 SMLoc ImmLoc = Parser.getTok().getLoc();
3349 const MCExpr *ShiftExpr = nullptr;
3350 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3351 Error(ImmLoc, "invalid immediate shift value");
3352 return -1;
3353 }
3354 // The expression must be evaluatable as an immediate.
3355 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3356 if (!CE) {
3357 Error(ImmLoc, "invalid immediate shift value");
3358 return -1;
3359 }
3360 // Range check the immediate.
3361 // lsl, ror: 0 <= imm <= 31
3362 // lsr, asr: 0 <= imm <= 32
3363 Imm = CE->getValue();
3364 if (Imm < 0 ||
3365 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3366 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3367 Error(ImmLoc, "immediate shift value out of range");
3368 return -1;
3369 }
3370 // shift by zero is a nop. Always send it through as lsl.
3371 // ('as' compatibility)
3372 if (Imm == 0)
3373 ShiftTy = ARM_AM::lsl;
3374 } else if (Parser.getTok().is(AsmToken::Identifier)) {
3375 SMLoc L = Parser.getTok().getLoc();
3376 EndLoc = Parser.getTok().getEndLoc();
3377 ShiftReg = tryParseRegister();
3378 if (ShiftReg == -1) {
3379 Error(L, "expected immediate or register in shift operand");
3380 return -1;
3381 }
3382 } else {
3383 Error(Parser.getTok().getLoc(),
3384 "expected immediate or register in shift operand");
3385 return -1;
3386 }
3387 }
3388
3389 if (ShiftReg && ShiftTy != ARM_AM::rrx)
3390 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3391 ShiftReg, Imm,
3392 S, EndLoc));
3393 else
3394 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3395 S, EndLoc));
3396
3397 return 0;
3398}
3399
3400/// Try to parse a register name. The token must be an Identifier when called.
3401/// If it's a register, an AsmOperand is created. Another AsmOperand is created
3402/// if there is a "writeback". 'true' if it's not a register.
3403///
3404/// TODO this is likely to change to allow different register types and or to
3405/// parse for a specific register type.
3406bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3407 MCAsmParser &Parser = getParser();
3408 SMLoc RegStartLoc = Parser.getTok().getLoc();
3409 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3410 int RegNo = tryParseRegister();
3411 if (RegNo == -1)
3412 return true;
3413
3414 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3415
3416 const AsmToken &ExclaimTok = Parser.getTok();
3417 if (ExclaimTok.is(AsmToken::Exclaim)) {
3418 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3419 ExclaimTok.getLoc()));
3420 Parser.Lex(); // Eat exclaim token
3421 return false;
3422 }
3423
3424 // Also check for an index operand. This is only legal for vector registers,
3425 // but that'll get caught OK in operand matching, so we don't need to
3426 // explicitly filter everything else out here.
3427 if (Parser.getTok().is(AsmToken::LBrac)) {
3428 SMLoc SIdx = Parser.getTok().getLoc();
3429 Parser.Lex(); // Eat left bracket token.
3430
3431 const MCExpr *ImmVal;
3432 if (getParser().parseExpression(ImmVal))
3433 return true;
3434 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3435 if (!MCE)
3436 return TokError("immediate value expected for vector index");
3437
3438 if (Parser.getTok().isNot(AsmToken::RBrac))
3439 return Error(Parser.getTok().getLoc(), "']' expected");
3440
3441 SMLoc E = Parser.getTok().getEndLoc();
3442 Parser.Lex(); // Eat right bracket token.
3443
3444 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3445 SIdx, E,
3446 getContext()));
3447 }
3448
3449 return false;
3450}
3451
3452/// MatchCoprocessorOperandName - Try to parse an coprocessor related
3453/// instruction with a symbolic operand name.
3454/// We accept "crN" syntax for GAS compatibility.
3455/// <operand-name> ::= <prefix><number>
3456/// If CoprocOp is 'c', then:
3457/// <prefix> ::= c | cr
3458/// If CoprocOp is 'p', then :
3459/// <prefix> ::= p
3460/// <number> ::= integer in range [0, 15]
3461static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3462 // Use the same layout as the tablegen'erated register name matcher. Ugly,
3463 // but efficient.
3464 if (Name.size() < 2 || Name[0] != CoprocOp)
3465 return -1;
3466 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3467
3468 switch (Name.size()) {
3469 default: return -1;
3470 case 1:
3471 switch (Name[0]) {
3472 default: return -1;
3473 case '0': return 0;
3474 case '1': return 1;
3475 case '2': return 2;
3476 case '3': return 3;
3477 case '4': return 4;
3478 case '5': return 5;
3479 case '6': return 6;
3480 case '7': return 7;
3481 case '8': return 8;
3482 case '9': return 9;
3483 }
3484 case 2:
3485 if (Name[0] != '1')
3486 return -1;
3487 switch (Name[1]) {
3488 default: return -1;
3489 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3490 // However, old cores (v5/v6) did use them in that way.
3491 case '0': return 10;
3492 case '1': return 11;
3493 case '2': return 12;
3494 case '3': return 13;
3495 case '4': return 14;
3496 case '5': return 15;
3497 }
3498 }
3499}
3500
3501/// parseITCondCode - Try to parse a condition code for an IT instruction.
3502OperandMatchResultTy
3503ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3504 MCAsmParser &Parser = getParser();
3505 SMLoc S = Parser.getTok().getLoc();
3506 const AsmToken &Tok = Parser.getTok();
3507 if (!Tok.is(AsmToken::Identifier))
3508 return MatchOperand_NoMatch;
3509 unsigned CC = ARMCondCodeFromString(Tok.getString());
3510 if (CC == ~0U)
3511 return MatchOperand_NoMatch;
3512 Parser.Lex(); // Eat the token.
3513
3514 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3515
3516 return MatchOperand_Success;
3517}
3518
3519/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3520/// token must be an Identifier when called, and if it is a coprocessor
3521/// number, the token is eaten and the operand is added to the operand list.
3522OperandMatchResultTy
3523ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3524 MCAsmParser &Parser = getParser();
3525 SMLoc S = Parser.getTok().getLoc();
3526 const AsmToken &Tok = Parser.getTok();
3527 if (Tok.isNot(AsmToken::Identifier))
3528 return MatchOperand_NoMatch;
3529
3530 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3531 if (Num == -1)
3532 return MatchOperand_NoMatch;
3533 // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3534 if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3535 return MatchOperand_NoMatch;
3536
3537 Parser.Lex(); // Eat identifier token.
3538 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3539 return MatchOperand_Success;
3540}
3541
3542/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3543/// token must be an Identifier when called, and if it is a coprocessor
3544/// number, the token is eaten and the operand is added to the operand list.
3545OperandMatchResultTy
3546ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3547 MCAsmParser &Parser = getParser();
3548 SMLoc S = Parser.getTok().getLoc();
3549 const AsmToken &Tok = Parser.getTok();
3550 if (Tok.isNot(AsmToken::Identifier))
3551 return MatchOperand_NoMatch;
3552
3553 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3554 if (Reg == -1)
3555 return MatchOperand_NoMatch;
3556
3557 Parser.Lex(); // Eat identifier token.
3558 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3559 return MatchOperand_Success;
3560}
3561
3562/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3563/// coproc_option : '{' imm0_255 '}'
3564OperandMatchResultTy
3565ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3566 MCAsmParser &Parser = getParser();
3567 SMLoc S = Parser.getTok().getLoc();
3568
3569 // If this isn't a '{', this isn't a coprocessor immediate operand.
3570 if (Parser.getTok().isNot(AsmToken::LCurly))
3571 return MatchOperand_NoMatch;
3572 Parser.Lex(); // Eat the '{'
3573
3574 const MCExpr *Expr;
3575 SMLoc Loc = Parser.getTok().getLoc();
3576 if (getParser().parseExpression(Expr)) {
3577 Error(Loc, "illegal expression");
3578 return MatchOperand_ParseFail;
3579 }
3580 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3581 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3582 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3583 return MatchOperand_ParseFail;
3584 }
3585 int Val = CE->getValue();
3586
3587 // Check for and consume the closing '}'
3588 if (Parser.getTok().isNot(AsmToken::RCurly))
3589 return MatchOperand_ParseFail;
3590 SMLoc E = Parser.getTok().getEndLoc();
3591 Parser.Lex(); // Eat the '}'
3592
3593 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3594 return MatchOperand_Success;
3595}
3596
3597// For register list parsing, we need to map from raw GPR register numbering
3598// to the enumeration values. The enumeration values aren't sorted by
3599// register number due to our using "sp", "lr" and "pc" as canonical names.
3600static unsigned getNextRegister(unsigned Reg) {
3601 // If this is a GPR, we need to do it manually, otherwise we can rely
3602 // on the sort ordering of the enumeration since the other reg-classes
3603 // are sane.
3604 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3605 return Reg + 1;
3606 switch(Reg) {
3607 default: llvm_unreachable("Invalid GPR number!")::llvm::llvm_unreachable_internal("Invalid GPR number!", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 3607)
;
3608 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3609 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3610 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3611 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3612 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3613 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3614 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3615 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3616 }
3617}
3618
3619/// Parse a register list.
3620bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3621 MCAsmParser &Parser = getParser();
3622 if (Parser.getTok().isNot(AsmToken::LCurly))
3623 return TokError("Token is not a Left Curly Brace");
3624 SMLoc S = Parser.getTok().getLoc();
3625 Parser.Lex(); // Eat '{' token.
3626 SMLoc RegLoc = Parser.getTok().getLoc();
3627
3628 // Check the first register in the list to see what register class
3629 // this is a list of.
3630 int Reg = tryParseRegister();
3631 if (Reg == -1)
3632 return Error(RegLoc, "register expected");
3633
3634 // The reglist instructions have at most 16 registers, so reserve
3635 // space for that many.
3636 int EReg = 0;
3637 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3638
3639 // Allow Q regs and just interpret them as the two D sub-registers.
3640 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3641 Reg = getDRegFromQReg(Reg);
3642 EReg = MRI->getEncodingValue(Reg);
3643 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3644 ++Reg;
3645 }
3646 const MCRegisterClass *RC;
3647 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3648 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3649 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3650 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3651 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3652 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3653 else
3654 return Error(RegLoc, "invalid register in register list");
3655
3656 // Store the register.
3657 EReg = MRI->getEncodingValue(Reg);
3658 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3659
3660 // This starts immediately after the first register token in the list,
3661 // so we can see either a comma or a minus (range separator) as a legal
3662 // next token.
3663 while (Parser.getTok().is(AsmToken::Comma) ||
3664 Parser.getTok().is(AsmToken::Minus)) {
3665 if (Parser.getTok().is(AsmToken::Minus)) {
3666 Parser.Lex(); // Eat the minus.
3667 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3668 int EndReg = tryParseRegister();
3669 if (EndReg == -1)
3670 return Error(AfterMinusLoc, "register expected");
3671 // Allow Q regs and just interpret them as the two D sub-registers.
3672 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3673 EndReg = getDRegFromQReg(EndReg) + 1;
3674 // If the register is the same as the start reg, there's nothing
3675 // more to do.
3676 if (Reg == EndReg)
3677 continue;
3678 // The register must be in the same register class as the first.
3679 if (!RC->contains(EndReg))
3680 return Error(AfterMinusLoc, "invalid register in register list");
3681 // Ranges must go from low to high.
3682 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3683 return Error(AfterMinusLoc, "bad range in register list");
3684
3685 // Add all the registers in the range to the register list.
3686 while (Reg != EndReg) {
3687 Reg = getNextRegister(Reg);
3688 EReg = MRI->getEncodingValue(Reg);
3689 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3690 }
3691 continue;
3692 }
3693 Parser.Lex(); // Eat the comma.
3694 RegLoc = Parser.getTok().getLoc();
3695 int OldReg = Reg;
3696 const AsmToken RegTok = Parser.getTok();
3697 Reg = tryParseRegister();
3698 if (Reg == -1)
3699 return Error(RegLoc, "register expected");
3700 // Allow Q regs and just interpret them as the two D sub-registers.
3701 bool isQReg = false;
3702 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3703 Reg = getDRegFromQReg(Reg);
3704 isQReg = true;
3705 }
3706 // The register must be in the same register class as the first.
3707 if (!RC->contains(Reg))
3708 return Error(RegLoc, "invalid register in register list");
3709 // List must be monotonically increasing.
3710 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3711 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3712 Warning(RegLoc, "register list not in ascending order");
3713 else
3714 return Error(RegLoc, "register list not in ascending order");
3715 }
3716 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3717 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3718 ") in register list");
3719 continue;
3720 }
3721 // VFP register lists must also be contiguous.
3722 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3723 Reg != OldReg + 1)
3724 return Error(RegLoc, "non-contiguous register range");
3725 EReg = MRI->getEncodingValue(Reg);
3726 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3727 if (isQReg) {
3728 EReg = MRI->getEncodingValue(++Reg);
3729 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3730 }
3731 }
3732
3733 if (Parser.getTok().isNot(AsmToken::RCurly))
3734 return Error(Parser.getTok().getLoc(), "'}' expected");
3735 SMLoc E = Parser.getTok().getEndLoc();
3736 Parser.Lex(); // Eat '}' token.
3737
3738 // Push the register list operand.
3739 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3740
3741 // The ARM system instruction variants for LDM/STM have a '^' token here.
3742 if (Parser.getTok().is(AsmToken::Caret)) {
3743 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3744 Parser.Lex(); // Eat '^' token.
3745 }
3746
3747 return false;
3748}
3749
3750// Helper function to parse the lane index for vector lists.
3751OperandMatchResultTy ARMAsmParser::
3752parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3753 MCAsmParser &Parser = getParser();
3754 Index = 0; // Always return a defined index value.
3755 if (Parser.getTok().is(AsmToken::LBrac)) {
3756 Parser.Lex(); // Eat the '['.
3757 if (Parser.getTok().is(AsmToken::RBrac)) {
3758 // "Dn[]" is the 'all lanes' syntax.
3759 LaneKind = AllLanes;
3760 EndLoc = Parser.getTok().getEndLoc();
3761 Parser.Lex(); // Eat the ']'.
3762 return MatchOperand_Success;
3763 }
3764
3765 // There's an optional '#' token here. Normally there wouldn't be, but
3766 // inline assemble puts one in, and it's friendly to accept that.
3767 if (Parser.getTok().is(AsmToken::Hash))
3768 Parser.Lex(); // Eat '#' or '$'.
3769
3770 const MCExpr *LaneIndex;
3771 SMLoc Loc = Parser.getTok().getLoc();
3772 if (getParser().parseExpression(LaneIndex)) {
3773 Error(Loc, "illegal expression");
3774 return MatchOperand_ParseFail;
3775 }
3776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3777 if (!CE) {
3778 Error(Loc, "lane index must be empty or an integer");
3779 return MatchOperand_ParseFail;
3780 }
3781 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3782 Error(Parser.getTok().getLoc(), "']' expected");
3783 return MatchOperand_ParseFail;
3784 }
3785 EndLoc = Parser.getTok().getEndLoc();
3786 Parser.Lex(); // Eat the ']'.
3787 int64_t Val = CE->getValue();
3788
3789 // FIXME: Make this range check context sensitive for .8, .16, .32.
3790 if (Val < 0 || Val > 7) {
3791 Error(Parser.getTok().getLoc(), "lane index out of range");
3792 return MatchOperand_ParseFail;
3793 }
3794 Index = Val;
3795 LaneKind = IndexedLane;
3796 return MatchOperand_Success;
3797 }
3798 LaneKind = NoLanes;
3799 return MatchOperand_Success;
3800}
3801
3802// parse a vector register list
3803OperandMatchResultTy
3804ARMAsmParser::parseVectorList(OperandVector &Operands) {
3805 MCAsmParser &Parser = getParser();
3806 VectorLaneTy LaneKind;
3807 unsigned LaneIndex;
3808 SMLoc S = Parser.getTok().getLoc();
3809 // As an extension (to match gas), support a plain D register or Q register
3810 // (without encosing curly braces) as a single or double entry list,
3811 // respectively.
3812 if (Parser.getTok().is(AsmToken::Identifier)) {
3813 SMLoc E = Parser.getTok().getEndLoc();
3814 int Reg = tryParseRegister();
3815 if (Reg == -1)
3816 return MatchOperand_NoMatch;
3817 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3818 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3819 if (Res != MatchOperand_Success)
3820 return Res;
3821 switch (LaneKind) {
3822 case NoLanes:
3823 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3824 break;
3825 case AllLanes:
3826 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3827 S, E));
3828 break;
3829 case IndexedLane:
3830 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3831 LaneIndex,
3832 false, S, E));
3833 break;
3834 }
3835 return MatchOperand_Success;
3836 }
3837 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3838 Reg = getDRegFromQReg(Reg);
3839 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3840 if (Res != MatchOperand_Success)
3841 return Res;
3842 switch (LaneKind) {
3843 case NoLanes:
3844 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3845 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3846 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3847 break;
3848 case AllLanes:
3849 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3850 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3851 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3852 S, E));
3853 break;
3854 case IndexedLane:
3855 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3856 LaneIndex,
3857 false, S, E));
3858 break;
3859 }
3860 return MatchOperand_Success;
3861 }
3862 Error(S, "vector register expected");
3863 return MatchOperand_ParseFail;
3864 }
3865
3866 if (Parser.getTok().isNot(AsmToken::LCurly))
3867 return MatchOperand_NoMatch;
3868
3869 Parser.Lex(); // Eat '{' token.
3870 SMLoc RegLoc = Parser.getTok().getLoc();
3871
3872 int Reg = tryParseRegister();
3873 if (Reg == -1) {
3874 Error(RegLoc, "register expected");
3875 return MatchOperand_ParseFail;
3876 }
3877 unsigned Count = 1;
3878 int Spacing = 0;
3879 unsigned FirstReg = Reg;
3880 // The list is of D registers, but we also allow Q regs and just interpret
3881 // them as the two D sub-registers.
3882 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3883 FirstReg = Reg = getDRegFromQReg(Reg);
3884 Spacing = 1; // double-spacing requires explicit D registers, otherwise
3885 // it's ambiguous with four-register single spaced.
3886 ++Reg;
3887 ++Count;
3888 }
3889
3890 SMLoc E;
3891 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3892 return MatchOperand_ParseFail;
3893
3894 while (Parser.getTok().is(AsmToken::Comma) ||
3895 Parser.getTok().is(AsmToken::Minus)) {
3896 if (Parser.getTok().is(AsmToken::Minus)) {
3897 if (!Spacing)
3898 Spacing = 1; // Register range implies a single spaced list.
3899 else if (Spacing == 2) {
3900 Error(Parser.getTok().getLoc(),
3901 "sequential registers in double spaced list");
3902 return MatchOperand_ParseFail;
3903 }
3904 Parser.Lex(); // Eat the minus.
3905 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3906 int EndReg = tryParseRegister();
3907 if (EndReg == -1) {
3908 Error(AfterMinusLoc, "register expected");
3909 return MatchOperand_ParseFail;
3910 }
3911 // Allow Q regs and just interpret them as the two D sub-registers.
3912 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3913 EndReg = getDRegFromQReg(EndReg) + 1;
3914 // If the register is the same as the start reg, there's nothing
3915 // more to do.
3916 if (Reg == EndReg)
3917 continue;
3918 // The register must be in the same register class as the first.
3919 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3920 Error(AfterMinusLoc, "invalid register in register list");
3921 return MatchOperand_ParseFail;
3922 }
3923 // Ranges must go from low to high.
3924 if (Reg > EndReg) {
3925 Error(AfterMinusLoc, "bad range in register list");
3926 return MatchOperand_ParseFail;
3927 }
3928 // Parse the lane specifier if present.
3929 VectorLaneTy NextLaneKind;
3930 unsigned NextLaneIndex;
3931 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3932 MatchOperand_Success)
3933 return MatchOperand_ParseFail;
3934 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3935 Error(AfterMinusLoc, "mismatched lane index in register list");
3936 return MatchOperand_ParseFail;
3937 }
3938
3939 // Add all the registers in the range to the register list.
3940 Count += EndReg - Reg;
3941 Reg = EndReg;
3942 continue;
3943 }
3944 Parser.Lex(); // Eat the comma.
3945 RegLoc = Parser.getTok().getLoc();
3946 int OldReg = Reg;
3947 Reg = tryParseRegister();
3948 if (Reg == -1) {
3949 Error(RegLoc, "register expected");
3950 return MatchOperand_ParseFail;
3951 }
3952 // vector register lists must be contiguous.
3953 // It's OK to use the enumeration values directly here rather, as the
3954 // VFP register classes have the enum sorted properly.
3955 //
3956 // The list is of D registers, but we also allow Q regs and just interpret
3957 // them as the two D sub-registers.
3958 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3959 if (!Spacing)
3960 Spacing = 1; // Register range implies a single spaced list.
3961 else if (Spacing == 2) {
3962 Error(RegLoc,
3963 "invalid register in double-spaced list (must be 'D' register')");
3964 return MatchOperand_ParseFail;
3965 }
3966 Reg = getDRegFromQReg(Reg);
3967 if (Reg != OldReg + 1) {
3968 Error(RegLoc, "non-contiguous register range");
3969 return MatchOperand_ParseFail;
3970 }
3971 ++Reg;
3972 Count += 2;
3973 // Parse the lane specifier if present.
3974 VectorLaneTy NextLaneKind;
3975 unsigned NextLaneIndex;
3976 SMLoc LaneLoc = Parser.getTok().getLoc();
3977 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3978 MatchOperand_Success)
3979 return MatchOperand_ParseFail;
3980 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3981 Error(LaneLoc, "mismatched lane index in register list");
3982 return MatchOperand_ParseFail;
3983 }
3984 continue;
3985 }
3986 // Normal D register.
3987 // Figure out the register spacing (single or double) of the list if
3988 // we don't know it already.
3989 if (!Spacing)
3990 Spacing = 1 + (Reg == OldReg + 2);
3991
3992 // Just check that it's contiguous and keep going.
3993 if (Reg != OldReg + Spacing) {
3994 Error(RegLoc, "non-contiguous register range");
3995 return MatchOperand_ParseFail;
3996 }
3997 ++Count;
3998 // Parse the lane specifier if present.
3999 VectorLaneTy NextLaneKind;
4000 unsigned NextLaneIndex;
4001 SMLoc EndLoc = Parser.getTok().getLoc();
4002 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4003 return MatchOperand_ParseFail;
4004 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4005 Error(EndLoc, "mismatched lane index in register list");
4006 return MatchOperand_ParseFail;
4007 }
4008 }
4009
4010 if (Parser.getTok().isNot(AsmToken::RCurly)) {
4011 Error(Parser.getTok().getLoc(), "'}' expected");
4012 return MatchOperand_ParseFail;
4013 }
4014 E = Parser.getTok().getEndLoc();
4015 Parser.Lex(); // Eat '}' token.
4016
4017 switch (LaneKind) {
4018 case NoLanes:
4019 // Two-register operands have been converted to the
4020 // composite register classes.
4021 if (Count == 2) {
4022 const MCRegisterClass *RC = (Spacing == 1) ?
4023 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4024 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4025 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4026 }
4027 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4028 (Spacing == 2), S, E));
4029 break;
4030 case AllLanes:
4031 // Two-register operands have been converted to the
4032 // composite register classes.
4033 if (Count == 2) {
4034 const MCRegisterClass *RC = (Spacing == 1) ?
4035 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4036 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4037 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4038 }
4039 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4040 (Spacing == 2),
4041 S, E));
4042 break;
4043 case IndexedLane:
4044 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4045 LaneIndex,
4046 (Spacing == 2),
4047 S, E));
4048 break;
4049 }
4050 return MatchOperand_Success;
4051}
4052
4053/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4054OperandMatchResultTy
4055ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4056 MCAsmParser &Parser = getParser();
4057 SMLoc S = Parser.getTok().getLoc();
4058 const AsmToken &Tok = Parser.getTok();
4059 unsigned Opt;
4060
4061 if (Tok.is(AsmToken::Identifier)) {
4062 StringRef OptStr = Tok.getString();
4063
4064 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4065 .Case("sy", ARM_MB::SY)
4066 .Case("st", ARM_MB::ST)
4067 .Case("ld", ARM_MB::LD)
4068 .Case("sh", ARM_MB::ISH)
4069 .Case("ish", ARM_MB::ISH)
4070 .Case("shst", ARM_MB::ISHST)
4071 .Case("ishst", ARM_MB::ISHST)
4072 .Case("ishld", ARM_MB::ISHLD)
4073 .Case("nsh", ARM_MB::NSH)
4074 .Case("un", ARM_MB::NSH)
4075 .Case("nshst", ARM_MB::NSHST)
4076 .Case("nshld", ARM_MB::NSHLD)
4077 .Case("unst", ARM_MB::NSHST)
4078 .Case("osh", ARM_MB::OSH)
4079 .Case("oshst", ARM_MB::OSHST)
4080 .Case("oshld", ARM_MB::OSHLD)
4081 .Default(~0U);
4082
4083 // ishld, oshld, nshld and ld are only available from ARMv8.
4084 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4085 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4086 Opt = ~0U;
4087
4088 if (Opt == ~0U)
4089 return MatchOperand_NoMatch;
4090
4091 Parser.Lex(); // Eat identifier token.
4092 } else if (Tok.is(AsmToken::Hash) ||
4093 Tok.is(AsmToken::Dollar) ||
4094 Tok.is(AsmToken::Integer)) {
4095 if (Parser.getTok().isNot(AsmToken::Integer))
4096 Parser.Lex(); // Eat '#' or '$'.
4097 SMLoc Loc = Parser.getTok().getLoc();
4098
4099 const MCExpr *MemBarrierID;
4100 if (getParser().parseExpression(MemBarrierID)) {
4101 Error(Loc, "illegal expression");
4102 return MatchOperand_ParseFail;
4103 }
4104
4105 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4106 if (!CE) {
4107 Error(Loc, "constant expression expected");
4108 return MatchOperand_ParseFail;
4109 }
4110
4111 int Val = CE->getValue();
4112 if (Val & ~0xf) {
4113 Error(Loc, "immediate value out of range");
4114 return MatchOperand_ParseFail;
4115 }
4116
4117 Opt = ARM_MB::RESERVED_0 + Val;
4118 } else
4119 return MatchOperand_ParseFail;
4120
4121 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4122 return MatchOperand_Success;
4123}
4124
4125/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4126OperandMatchResultTy
4127ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4128 MCAsmParser &Parser = getParser();
4129 SMLoc S = Parser.getTok().getLoc();
4130 const AsmToken &Tok = Parser.getTok();
4131 unsigned Opt;
4132
4133 if (Tok.is(AsmToken::Identifier)) {
4134 StringRef OptStr = Tok.getString();
4135
4136 if (OptStr.equals_lower("sy"))
4137 Opt = ARM_ISB::SY;
4138 else
4139 return MatchOperand_NoMatch;
4140
4141 Parser.Lex(); // Eat identifier token.
4142 } else if (Tok.is(AsmToken::Hash) ||
4143 Tok.is(AsmToken::Dollar) ||
4144 Tok.is(AsmToken::Integer)) {
4145 if (Parser.getTok().isNot(AsmToken::Integer))
4146 Parser.Lex(); // Eat '#' or '$'.
4147 SMLoc Loc = Parser.getTok().getLoc();
4148
4149 const MCExpr *ISBarrierID;
4150 if (getParser().parseExpression(ISBarrierID)) {
4151 Error(Loc, "illegal expression");
4152 return MatchOperand_ParseFail;
4153 }
4154
4155 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4156 if (!CE) {
4157 Error(Loc, "constant expression expected");
4158 return MatchOperand_ParseFail;
4159 }
4160
4161 int Val = CE->getValue();
4162 if (Val & ~0xf) {
4163 Error(Loc, "immediate value out of range");
4164 return MatchOperand_ParseFail;
4165 }
4166
4167 Opt = ARM_ISB::RESERVED_0 + Val;
4168 } else
4169 return MatchOperand_ParseFail;
4170
4171 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4172 (ARM_ISB::InstSyncBOpt)Opt, S));
4173 return MatchOperand_Success;
4174}
4175
4176
4177/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4178OperandMatchResultTy
4179ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4180 MCAsmParser &Parser = getParser();
4181 SMLoc S = Parser.getTok().getLoc();
4182 const AsmToken &Tok = Parser.getTok();
4183 if (!Tok.is(AsmToken::Identifier))
4184 return MatchOperand_NoMatch;
4185 StringRef IFlagsStr = Tok.getString();
4186
4187 // An iflags string of "none" is interpreted to mean that none of the AIF
4188 // bits are set. Not a terribly useful instruction, but a valid encoding.
4189 unsigned IFlags = 0;
4190 if (IFlagsStr != "none") {
4191 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4192 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4193 .Case("a", ARM_PROC::A)
4194 .Case("i", ARM_PROC::I)
4195 .Case("f", ARM_PROC::F)
4196 .Default(~0U);
4197
4198 // If some specific iflag is already set, it means that some letter is
4199 // present more than once, this is not acceptable.
4200 if (Flag == ~0U || (IFlags & Flag))
4201 return MatchOperand_NoMatch;
4202
4203 IFlags |= Flag;
4204 }
4205 }
4206
4207 Parser.Lex(); // Eat identifier token.
4208 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4209 return MatchOperand_Success;
4210}
4211
4212/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4213OperandMatchResultTy
4214ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4215 MCAsmParser &Parser = getParser();
4216 SMLoc S = Parser.getTok().getLoc();
4217 const AsmToken &Tok = Parser.getTok();
4218 if (!Tok.is(AsmToken::Identifier))
4219 return MatchOperand_NoMatch;
4220 StringRef Mask = Tok.getString();
4221
4222 if (isMClass()) {
4223 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4224 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4225 return MatchOperand_NoMatch;
4226
4227 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4228
4229 Parser.Lex(); // Eat identifier token.
4230 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4231 return MatchOperand_Success;
4232 }
4233
4234 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4235 size_t Start = 0, Next = Mask.find('_');
4236 StringRef Flags = "";
4237 std::string SpecReg = Mask.slice(Start, Next).lower();
4238 if (Next != StringRef::npos)
4239 Flags = Mask.slice(Next+1, Mask.size());
4240
4241 // FlagsVal contains the complete mask:
4242 // 3-0: Mask
4243 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4244 unsigned FlagsVal = 0;
4245
4246 if (SpecReg == "apsr") {
4247 FlagsVal = StringSwitch<unsigned>(Flags)
4248 .Case("nzcvq", 0x8) // same as CPSR_f
4249 .Case("g", 0x4) // same as CPSR_s
4250 .Case("nzcvqg", 0xc) // same as CPSR_fs
4251 .Default(~0U);
4252
4253 if (FlagsVal == ~0U) {
4254 if (!Flags.empty())
4255 return MatchOperand_NoMatch;
4256 else
4257 FlagsVal = 8; // No flag
4258 }
4259 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4260 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4261 if (Flags == "all" || Flags == "")
4262 Flags = "fc";
4263 for (int i = 0, e = Flags.size(); i != e; ++i) {
4264 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4265 .Case("c", 1)
4266 .Case("x", 2)
4267 .Case("s", 4)
4268 .Case("f", 8)
4269 .Default(~0U);
4270
4271 // If some specific flag is already set, it means that some letter is
4272 // present more than once, this is not acceptable.
4273 if (Flag == ~0U || (FlagsVal & Flag))
4274 return MatchOperand_NoMatch;
4275 FlagsVal |= Flag;
4276 }
4277 } else // No match for special register.
4278 return MatchOperand_NoMatch;
4279
4280 // Special register without flags is NOT equivalent to "fc" flags.
4281 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
4282 // two lines would enable gas compatibility at the expense of breaking
4283 // round-tripping.
4284 //
4285 // if (!FlagsVal)
4286 // FlagsVal = 0x9;
4287
4288 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4289 if (SpecReg == "spsr")
4290 FlagsVal |= 16;
4291
4292 Parser.Lex(); // Eat identifier token.
4293 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4294 return MatchOperand_Success;
4295}
4296
4297/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4298/// use in the MRS/MSR instructions added to support virtualization.
4299OperandMatchResultTy
4300ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4301 MCAsmParser &Parser = getParser();
4302 SMLoc S = Parser.getTok().getLoc();
4303 const AsmToken &Tok = Parser.getTok();
4304 if (!Tok.is(AsmToken::Identifier))
4305 return MatchOperand_NoMatch;
4306 StringRef RegName = Tok.getString();
4307
4308 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4309 if (!TheReg)
4310 return MatchOperand_NoMatch;
4311 unsigned Encoding = TheReg->Encoding;
4312
4313 Parser.Lex(); // Eat identifier token.
4314 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4315 return MatchOperand_Success;
4316}
4317
4318OperandMatchResultTy
4319ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4320 int High) {
4321 MCAsmParser &Parser = getParser();
4322 const AsmToken &Tok = Parser.getTok();
4323 if (Tok.isNot(AsmToken::Identifier)) {
4324 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4325 return MatchOperand_ParseFail;
4326 }
4327 StringRef ShiftName = Tok.getString();
4328 std::string LowerOp = Op.lower();
4329 std::string UpperOp = Op.upper();
4330 if (ShiftName != LowerOp && ShiftName != UpperOp) {
4331 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4332 return MatchOperand_ParseFail;
4333 }
4334 Parser.Lex(); // Eat shift type token.
4335
4336 // There must be a '#' and a shift amount.
4337 if (Parser.getTok().isNot(AsmToken::Hash) &&
4338 Parser.getTok().isNot(AsmToken::Dollar)) {
4339 Error(Parser.getTok().getLoc(), "'#' expected");
4340 return MatchOperand_ParseFail;
4341 }
4342 Parser.Lex(); // Eat hash token.
4343
4344 const MCExpr *ShiftAmount;
4345 SMLoc Loc = Parser.getTok().getLoc();
4346 SMLoc EndLoc;
4347 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4348 Error(Loc, "illegal expression");
4349 return MatchOperand_ParseFail;
4350 }
4351 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4352 if (!CE) {
4353 Error(Loc, "constant expression expected");
4354 return MatchOperand_ParseFail;
4355 }
4356 int Val = CE->getValue();
4357 if (Val < Low || Val > High) {
4358 Error(Loc, "immediate value out of range");
4359 return MatchOperand_ParseFail;
4360 }
4361
4362 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4363
4364 return MatchOperand_Success;
4365}
4366
4367OperandMatchResultTy
4368ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4369 MCAsmParser &Parser = getParser();
4370 const AsmToken &Tok = Parser.getTok();
4371 SMLoc S = Tok.getLoc();
4372 if (Tok.isNot(AsmToken::Identifier)) {
4373 Error(S, "'be' or 'le' operand expected");
4374 return MatchOperand_ParseFail;
4375 }
4376 int Val = StringSwitch<int>(Tok.getString().lower())
4377 .Case("be", 1)
4378 .Case("le", 0)
4379 .Default(-1);
4380 Parser.Lex(); // Eat the token.
4381
4382 if (Val == -1) {
4383 Error(S, "'be' or 'le' operand expected");
4384 return MatchOperand_ParseFail;
4385 }
4386 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4387 getContext()),
4388 S, Tok.getEndLoc()));
4389 return MatchOperand_Success;
4390}
4391
4392/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4393/// instructions. Legal values are:
4394/// lsl #n 'n' in [0,31]
4395/// asr #n 'n' in [1,32]
4396/// n == 32 encoded as n == 0.
4397OperandMatchResultTy
4398ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4399 MCAsmParser &Parser = getParser();
4400 const AsmToken &Tok = Parser.getTok();
4401 SMLoc S = Tok.getLoc();
4402 if (Tok.isNot(AsmToken::Identifier)) {
4403 Error(S, "shift operator 'asr' or 'lsl' expected");
4404 return MatchOperand_ParseFail;
4405 }
4406 StringRef ShiftName = Tok.getString();
4407 bool isASR;
4408 if (ShiftName == "lsl" || ShiftName == "LSL")
4409 isASR = false;
4410 else if (ShiftName == "asr" || ShiftName == "ASR")
4411 isASR = true;
4412 else {
4413 Error(S, "shift operator 'asr' or 'lsl' expected");
4414 return MatchOperand_ParseFail;
4415 }
4416 Parser.Lex(); // Eat the operator.
4417
4418 // A '#' and a shift amount.
4419 if (Parser.getTok().isNot(AsmToken::Hash) &&
4420 Parser.getTok().isNot(AsmToken::Dollar)) {
4421 Error(Parser.getTok().getLoc(), "'#' expected");
4422 return MatchOperand_ParseFail;
4423 }
4424 Parser.Lex(); // Eat hash token.
4425 SMLoc ExLoc = Parser.getTok().getLoc();
4426
4427 const MCExpr *ShiftAmount;
4428 SMLoc EndLoc;
4429 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4430 Error(ExLoc, "malformed shift expression");
4431 return MatchOperand_ParseFail;
4432 }
4433 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4434 if (!CE) {
4435 Error(ExLoc, "shift amount must be an immediate");
4436 return MatchOperand_ParseFail;
4437 }
4438
4439 int64_t Val = CE->getValue();
4440 if (isASR) {
4441 // Shift amount must be in [1,32]
4442 if (Val < 1 || Val > 32) {
4443 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4444 return MatchOperand_ParseFail;
4445 }
4446 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4447 if (isThumb() && Val == 32) {
4448 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4449 return MatchOperand_ParseFail;
4450 }
4451 if (Val == 32) Val = 0;
4452 } else {
4453 // Shift amount must be in [1,32]
4454 if (Val < 0 || Val > 31) {
4455 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4456 return MatchOperand_ParseFail;
4457 }
4458 }
4459
4460 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4461
4462 return MatchOperand_Success;
4463}
4464
4465/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4466/// of instructions. Legal values are:
4467/// ror #n 'n' in {0, 8, 16, 24}
4468OperandMatchResultTy
4469ARMAsmParser::parseRotImm(OperandVector &Operands) {
4470 MCAsmParser &Parser = getParser();
4471 const AsmToken &Tok = Parser.getTok();
4472 SMLoc S = Tok.getLoc();
4473 if (Tok.isNot(AsmToken::Identifier))
4474 return MatchOperand_NoMatch;
4475 StringRef ShiftName = Tok.getString();
4476 if (ShiftName != "ror" && ShiftName != "ROR")
4477 return MatchOperand_NoMatch;
4478 Parser.Lex(); // Eat the operator.
4479
4480 // A '#' and a rotate amount.
4481 if (Parser.getTok().isNot(AsmToken::Hash) &&
4482 Parser.getTok().isNot(AsmToken::Dollar)) {
4483 Error(Parser.getTok().getLoc(), "'#' expected");
4484 return MatchOperand_ParseFail;
4485 }
4486 Parser.Lex(); // Eat hash token.
4487 SMLoc ExLoc = Parser.getTok().getLoc();
4488
4489 const MCExpr *ShiftAmount;
4490 SMLoc EndLoc;
4491 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4492 Error(ExLoc, "malformed rotate expression");
4493 return MatchOperand_ParseFail;
4494 }
4495 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4496 if (!CE) {
4497 Error(ExLoc, "rotate amount must be an immediate");
4498 return MatchOperand_ParseFail;
4499 }
4500
4501 int64_t Val = CE->getValue();
4502 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4503 // normally, zero is represented in asm by omitting the rotate operand
4504 // entirely.
4505 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4506 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4507 return MatchOperand_ParseFail;
4508 }
4509
4510 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4511
4512 return MatchOperand_Success;
4513}
4514
4515OperandMatchResultTy
4516ARMAsmParser::parseModImm(OperandVector &Operands) {
4517 MCAsmParser &Parser = getParser();
4518 MCAsmLexer &Lexer = getLexer();
4519 int64_t Imm1, Imm2;
4520
4521 SMLoc S = Parser.getTok().getLoc();
4522
4523 // 1) A mod_imm operand can appear in the place of a register name:
4524 // add r0, #mod_imm
4525 // add r0, r0, #mod_imm
4526 // to correctly handle the latter, we bail out as soon as we see an
4527 // identifier.
4528 //
4529 // 2) Similarly, we do not want to parse into complex operands:
4530 // mov r0, #mod_imm
4531 // mov r0, :lower16:(_foo)
4532 if (Parser.getTok().is(AsmToken::Identifier) ||
4533 Parser.getTok().is(AsmToken::Colon))
4534 return MatchOperand_NoMatch;
4535
4536 // Hash (dollar) is optional as per the ARMARM
4537 if (Parser.getTok().is(AsmToken::Hash) ||
4538 Parser.getTok().is(AsmToken::Dollar)) {
4539 // Avoid parsing into complex operands (#:)
4540 if (Lexer.peekTok().is(AsmToken::Colon))
4541 return MatchOperand_NoMatch;
4542
4543 // Eat the hash (dollar)
4544 Parser.Lex();
4545 }
4546
4547 SMLoc Sx1, Ex1;
4548 Sx1 = Parser.getTok().getLoc();
4549 const MCExpr *Imm1Exp;
4550 if (getParser().parseExpression(Imm1Exp, Ex1)) {
4551 Error(Sx1, "malformed expression");
4552 return MatchOperand_ParseFail;
4553 }
4554
4555 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4556
4557 if (CE) {
4558 // Immediate must fit within 32-bits
4559 Imm1 = CE->getValue();
4560 int Enc = ARM_AM::getSOImmVal(Imm1);
4561 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4562 // We have a match!
4563 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4564 (Enc & 0xF00) >> 7,
4565 Sx1, Ex1));
4566 return MatchOperand_Success;
4567 }
4568
4569 // We have parsed an immediate which is not for us, fallback to a plain
4570 // immediate. This can happen for instruction aliases. For an example,
4571 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4572 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4573 // instruction with a mod_imm operand. The alias is defined such that the
4574 // parser method is shared, that's why we have to do this here.
4575 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4576 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4577 return MatchOperand_Success;
4578 }
4579 } else {
4580 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4581 // MCFixup). Fallback to a plain immediate.
4582 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4583 return MatchOperand_Success;
4584 }
4585
4586 // From this point onward, we expect the input to be a (#bits, #rot) pair
4587 if (Parser.getTok().isNot(AsmToken::Comma)) {
4588 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4589 return MatchOperand_ParseFail;
4590 }
4591
4592 if (Imm1 & ~0xFF) {
4593 Error(Sx1, "immediate operand must a number in the range [0, 255]");
4594 return MatchOperand_ParseFail;
4595 }
4596
4597 // Eat the comma
4598 Parser.Lex();
4599
4600 // Repeat for #rot
4601 SMLoc Sx2, Ex2;
4602 Sx2 = Parser.getTok().getLoc();
4603
4604 // Eat the optional hash (dollar)
4605 if (Parser.getTok().is(AsmToken::Hash) ||
4606 Parser.getTok().is(AsmToken::Dollar))
4607 Parser.Lex();
4608
4609 const MCExpr *Imm2Exp;
4610 if (getParser().parseExpression(Imm2Exp, Ex2)) {
4611 Error(Sx2, "malformed expression");
4612 return MatchOperand_ParseFail;
4613 }
4614
4615 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4616
4617 if (CE) {
4618 Imm2 = CE->getValue();
4619 if (!(Imm2 & ~0x1E)) {
4620 // We have a match!
4621 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4622 return MatchOperand_Success;
4623 }
4624 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4625 return MatchOperand_ParseFail;
4626 } else {
4627 Error(Sx2, "constant expression expected");
4628 return MatchOperand_ParseFail;
4629 }
4630}
4631
4632OperandMatchResultTy
4633ARMAsmParser::parseBitfield(OperandVector &Operands) {
4634 MCAsmParser &Parser = getParser();
4635 SMLoc S = Parser.getTok().getLoc();
4636 // The bitfield descriptor is really two operands, the LSB and the width.
4637 if (Parser.getTok().isNot(AsmToken::Hash) &&
4638 Parser.getTok().isNot(AsmToken::Dollar)) {
4639 Error(Parser.getTok().getLoc(), "'#' expected");
4640 return MatchOperand_ParseFail;
4641 }
4642 Parser.Lex(); // Eat hash token.
4643
4644 const MCExpr *LSBExpr;
4645 SMLoc E = Parser.getTok().getLoc();
4646 if (getParser().parseExpression(LSBExpr)) {
4647 Error(E, "malformed immediate expression");
4648 return MatchOperand_ParseFail;
4649 }
4650 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4651 if (!CE) {
4652 Error(E, "'lsb' operand must be an immediate");
4653 return MatchOperand_ParseFail;
4654 }
4655
4656 int64_t LSB = CE->getValue();
4657 // The LSB must be in the range [0,31]
4658 if (LSB < 0 || LSB > 31) {
4659 Error(E, "'lsb' operand must be in the range [0,31]");
4660 return MatchOperand_ParseFail;
4661 }
4662 E = Parser.getTok().getLoc();
4663
4664 // Expect another immediate operand.
4665 if (Parser.getTok().isNot(AsmToken::Comma)) {
4666 Error(Parser.getTok().getLoc(), "too few operands");
4667 return MatchOperand_ParseFail;
4668 }
4669 Parser.Lex(); // Eat hash token.
4670 if (Parser.getTok().isNot(AsmToken::Hash) &&
4671 Parser.getTok().isNot(AsmToken::Dollar)) {
4672 Error(Parser.getTok().getLoc(), "'#' expected");
4673 return MatchOperand_ParseFail;
4674 }
4675 Parser.Lex(); // Eat hash token.
4676
4677 const MCExpr *WidthExpr;
4678 SMLoc EndLoc;
4679 if (getParser().parseExpression(WidthExpr, EndLoc)) {
4680 Error(E, "malformed immediate expression");
4681 return MatchOperand_ParseFail;
4682 }
4683 CE = dyn_cast<MCConstantExpr>(WidthExpr);
4684 if (!CE) {
4685 Error(E, "'width' operand must be an immediate");
4686 return MatchOperand_ParseFail;
4687 }
4688
4689 int64_t Width = CE->getValue();
4690 // The LSB must be in the range [1,32-lsb]
4691 if (Width < 1 || Width > 32 - LSB) {
4692 Error(E, "'width' operand must be in the range [1,32-lsb]");
4693 return MatchOperand_ParseFail;
4694 }
4695
4696 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4697
4698 return MatchOperand_Success;
4699}
4700
4701OperandMatchResultTy
4702ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4703 // Check for a post-index addressing register operand. Specifically:
4704 // postidx_reg := '+' register {, shift}
4705 // | '-' register {, shift}
4706 // | register {, shift}
4707
4708 // This method must return MatchOperand_NoMatch without consuming any tokens
4709 // in the case where there is no match, as other alternatives take other
4710 // parse methods.
4711 MCAsmParser &Parser = getParser();
4712 AsmToken Tok = Parser.getTok();
4713 SMLoc S = Tok.getLoc();
4714 bool haveEaten = false;
4715 bool isAdd = true;
4716 if (Tok.is(AsmToken::Plus)) {
4717 Parser.Lex(); // Eat the '+' token.
4718 haveEaten = true;
4719 } else if (Tok.is(AsmToken::Minus)) {
4720 Parser.Lex(); // Eat the '-' token.
4721 isAdd = false;
4722 haveEaten = true;
4723 }
4724
4725 SMLoc E = Parser.getTok().getEndLoc();
4726 int Reg = tryParseRegister();
4727 if (Reg == -1) {
4728 if (!haveEaten)
4729 return MatchOperand_NoMatch;
4730 Error(Parser.getTok().getLoc(), "register expected");
4731 return MatchOperand_ParseFail;
4732 }
4733
4734 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4735 unsigned ShiftImm = 0;
4736 if (Parser.getTok().is(AsmToken::Comma)) {
4737 Parser.Lex(); // Eat the ','.
4738 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4739 return MatchOperand_ParseFail;
4740
4741 // FIXME: Only approximates end...may include intervening whitespace.
4742 E = Parser.getTok().getLoc();
4743 }
4744
4745 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4746 ShiftImm, S, E));
4747
4748 return MatchOperand_Success;
4749}
4750
4751OperandMatchResultTy
4752ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4753 // Check for a post-index addressing register operand. Specifically:
4754 // am3offset := '+' register
4755 // | '-' register
4756 // | register
4757 // | # imm
4758 // | # + imm
4759 // | # - imm
4760
4761 // This method must return MatchOperand_NoMatch without consuming any tokens
4762 // in the case where there is no match, as other alternatives take other
4763 // parse methods.
4764 MCAsmParser &Parser = getParser();
4765 AsmToken Tok = Parser.getTok();
4766 SMLoc S = Tok.getLoc();
4767
4768 // Do immediates first, as we always parse those if we have a '#'.
4769 if (Parser.getTok().is(AsmToken::Hash) ||
4770 Parser.getTok().is(AsmToken::Dollar)) {
4771 Parser.Lex(); // Eat '#' or '$'.
4772 // Explicitly look for a '-', as we need to encode negative zero
4773 // differently.
4774 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4775 const MCExpr *Offset;
4776 SMLoc E;
4777 if (getParser().parseExpression(Offset, E))
4778 return MatchOperand_ParseFail;
4779 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4780 if (!CE) {
4781 Error(S, "constant expression expected");
4782 return MatchOperand_ParseFail;
4783 }
4784 // Negative zero is encoded as the flag value
4785 // std::numeric_limits<int32_t>::min().
4786 int32_t Val = CE->getValue();
4787 if (isNegative && Val == 0)
4788 Val = std::numeric_limits<int32_t>::min();
4789
4790 Operands.push_back(
4791 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4792
4793 return MatchOperand_Success;
4794 }
4795
4796 bool haveEaten = false;
4797 bool isAdd = true;
4798 if (Tok.is(AsmToken::Plus)) {
4799 Parser.Lex(); // Eat the '+' token.
4800 haveEaten = true;
4801 } else if (Tok.is(AsmToken::Minus)) {
4802 Parser.Lex(); // Eat the '-' token.
4803 isAdd = false;
4804 haveEaten = true;
4805 }
4806
4807 Tok = Parser.getTok();
4808 int Reg = tryParseRegister();
4809 if (Reg == -1) {
4810 if (!haveEaten)
4811 return MatchOperand_NoMatch;
4812 Error(Tok.getLoc(), "register expected");
4813 return MatchOperand_ParseFail;
4814 }
4815
4816 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4817 0, S, Tok.getEndLoc()));
4818
4819 return MatchOperand_Success;
4820}
4821
4822/// Convert parsed operands to MCInst. Needed here because this instruction
4823/// only has two register operands, but multiplication is commutative so
4824/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4825void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4826 const OperandVector &Operands) {
4827 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4828 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4829 // If we have a three-operand form, make sure to set Rn to be the operand
4830 // that isn't the same as Rd.
4831 unsigned RegOp = 4;
4832 if (Operands.size() == 6 &&
4833 ((ARMOperand &)*Operands[4]).getReg() ==
4834 ((ARMOperand &)*Operands[3]).getReg())
4835 RegOp = 5;
4836 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4837 Inst.addOperand(Inst.getOperand(0));
4838 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4839}
4840
4841void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4842 const OperandVector &Operands) {
4843 int CondOp = -1, ImmOp = -1;
4844 switch(Inst.getOpcode()) {
4845 case ARM::tB:
4846 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4847
4848 case ARM::t2B:
4849 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4850
4851 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches")::llvm::llvm_unreachable_internal("Unexpected instruction in cvtThumbBranches"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 4851)
;
4852 }
4853 // first decide whether or not the branch should be conditional
4854 // by looking at it's location relative to an IT block
4855 if(inITBlock()) {
4856 // inside an IT block we cannot have any conditional branches. any
4857 // such instructions needs to be converted to unconditional form
4858 switch(Inst.getOpcode()) {
4859 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4860 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4861 }
4862 } else {
4863 // outside IT blocks we can only have unconditional branches with AL
4864 // condition code or conditional branches with non-AL condition code
4865 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4866 switch(Inst.getOpcode()) {
4867 case ARM::tB:
4868 case ARM::tBcc:
4869 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4870 break;
4871 case ARM::t2B:
4872 case ARM::t2Bcc:
4873 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4874 break;
4875 }
4876 }
4877
4878 // now decide on encoding size based on branch target range
4879 switch(Inst.getOpcode()) {
4880 // classify tB as either t2B or t1B based on range of immediate operand
4881 case ARM::tB: {
4882 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4883 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
4884 Inst.setOpcode(ARM::t2B);
4885 break;
4886 }
4887 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4888 case ARM::tBcc: {
4889 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4890 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
4891 Inst.setOpcode(ARM::t2Bcc);
4892 break;
4893 }
4894 }
4895 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4896 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4897}
4898
4899/// Parse an ARM memory expression, return false if successful else return true
4900/// or an error. The first token must be a '[' when called.
4901bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4902 MCAsmParser &Parser = getParser();
4903 SMLoc S, E;
4904 if (Parser.getTok().isNot(AsmToken::LBrac))
4905 return TokError("Token is not a Left Bracket");
4906 S = Parser.getTok().getLoc();
4907 Parser.Lex(); // Eat left bracket token.
4908
4909 const AsmToken &BaseRegTok = Parser.getTok();
4910 int BaseRegNum = tryParseRegister();
4911 if (BaseRegNum == -1)
4912 return Error(BaseRegTok.getLoc(), "register expected");
4913
4914 // The next token must either be a comma, a colon or a closing bracket.
4915 const AsmToken &Tok = Parser.getTok();
4916 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4917 !Tok.is(AsmToken::RBrac))
4918 return Error(Tok.getLoc(), "malformed memory operand");
4919
4920 if (Tok.is(AsmToken::RBrac)) {
4921 E = Tok.getEndLoc();
4922 Parser.Lex(); // Eat right bracket token.
4923
4924 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4925 ARM_AM::no_shift, 0, 0, false,
4926 S, E));
4927
4928 // If there's a pre-indexing writeback marker, '!', just add it as a token
4929 // operand. It's rather odd, but syntactically valid.
4930 if (Parser.getTok().is(AsmToken::Exclaim)) {
4931 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4932 Parser.Lex(); // Eat the '!'.
4933 }
4934
4935 return false;
4936 }
4937
4938 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&(static_cast <bool> ((Tok.is(AsmToken::Colon) || Tok.is
(AsmToken::Comma)) && "Lost colon or comma in memory operand?!"
) ? void (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 4939, __extension__ __PRETTY_FUNCTION__))
4939 "Lost colon or comma in memory operand?!")(static_cast <bool> ((Tok.is(AsmToken::Colon) || Tok.is
(AsmToken::Comma)) && "Lost colon or comma in memory operand?!"
) ? void (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 4939, __extension__ __PRETTY_FUNCTION__))
;
4940 if (Tok.is(AsmToken::Comma)) {
4941 Parser.Lex(); // Eat the comma.
4942 }
4943
4944 // If we have a ':', it's an alignment specifier.
4945 if (Parser.getTok().is(AsmToken::Colon)) {
4946 Parser.Lex(); // Eat the ':'.
4947 E = Parser.getTok().getLoc();
4948 SMLoc AlignmentLoc = Tok.getLoc();
4949
4950 const MCExpr *Expr;
4951 if (getParser().parseExpression(Expr))
4952 return true;
4953
4954 // The expression has to be a constant. Memory references with relocations
4955 // don't come through here, as they use the <label> forms of the relevant
4956 // instructions.
4957 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4958 if (!CE)
4959 return Error (E, "constant expression expected");
4960
4961 unsigned Align = 0;
4962 switch (CE->getValue()) {
4963 default:
4964 return Error(E,
4965 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4966 case 16: Align = 2; break;
4967 case 32: Align = 4; break;
4968 case 64: Align = 8; break;
4969 case 128: Align = 16; break;
4970 case 256: Align = 32; break;
4971 }
4972
4973 // Now we should have the closing ']'
4974 if (Parser.getTok().isNot(AsmToken::RBrac))
4975 return Error(Parser.getTok().getLoc(), "']' expected");
4976 E = Parser.getTok().getEndLoc();
4977 Parser.Lex(); // Eat right bracket token.
4978
4979 // Don't worry about range checking the value here. That's handled by
4980 // the is*() predicates.
4981 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4982 ARM_AM::no_shift, 0, Align,
4983 false, S, E, AlignmentLoc));
4984
4985 // If there's a pre-indexing writeback marker, '!', just add it as a token
4986 // operand.
4987 if (Parser.getTok().is(AsmToken::Exclaim)) {
4988 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4989 Parser.Lex(); // Eat the '!'.
4990 }
4991
4992 return false;
4993 }
4994
4995 // If we have a '#', it's an immediate offset, else assume it's a register
4996 // offset. Be friendly and also accept a plain integer (without a leading
4997 // hash) for gas compatibility.
4998 if (Parser.getTok().is(AsmToken::Hash) ||
4999 Parser.getTok().is(AsmToken::Dollar) ||
5000 Parser.getTok().is(AsmToken::Integer)) {
5001 if (Parser.getTok().isNot(AsmToken::Integer))
5002 Parser.Lex(); // Eat '#' or '$'.
5003 E = Parser.getTok().getLoc();
5004
5005 bool isNegative = getParser().getTok().is(AsmToken::Minus);
5006 const MCExpr *Offset;
5007 if (getParser().parseExpression(Offset))
5008 return true;
5009
5010 // The expression has to be a constant. Memory references with relocations
5011 // don't come through here, as they use the <label> forms of the relevant
5012 // instructions.
5013 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5014 if (!CE)
5015 return Error (E, "constant expression expected");
5016
5017 // If the constant was #-0, represent it as
5018 // std::numeric_limits<int32_t>::min().
5019 int32_t Val = CE->getValue();
5020 if (isNegative && Val == 0)
5021 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5022 getContext());
5023
5024 // Now we should have the closing ']'
5025 if (Parser.getTok().isNot(AsmToken::RBrac))
5026 return Error(Parser.getTok().getLoc(), "']' expected");
5027 E = Parser.getTok().getEndLoc();
5028 Parser.Lex(); // Eat right bracket token.
5029
5030 // Don't worry about range checking the value here. That's handled by
5031 // the is*() predicates.
5032 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5033 ARM_AM::no_shift, 0, 0,
5034 false, S, E));
5035
5036 // If there's a pre-indexing writeback marker, '!', just add it as a token
5037 // operand.
5038 if (Parser.getTok().is(AsmToken::Exclaim)) {
5039 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5040 Parser.Lex(); // Eat the '!'.
5041 }
5042
5043 return false;
5044 }
5045
5046 // The register offset is optionally preceded by a '+' or '-'
5047 bool isNegative = false;
5048 if (Parser.getTok().is(AsmToken::Minus)) {
5049 isNegative = true;
5050 Parser.Lex(); // Eat the '-'.
5051 } else if (Parser.getTok().is(AsmToken::Plus)) {
5052 // Nothing to do.
5053 Parser.Lex(); // Eat the '+'.
5054 }
5055
5056 E = Parser.getTok().getLoc();
5057 int OffsetRegNum = tryParseRegister();
5058 if (OffsetRegNum == -1)
5059 return Error(E, "register expected");
5060
5061 // If there's a shift operator, handle it.
5062 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5063 unsigned ShiftImm = 0;
5064 if (Parser.getTok().is(AsmToken::Comma)) {
5065 Parser.Lex(); // Eat the ','.
5066 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5067 return true;
5068 }
5069
5070 // Now we should have the closing ']'
5071 if (Parser.getTok().isNot(AsmToken::RBrac))
5072 return Error(Parser.getTok().getLoc(), "']' expected");
5073 E = Parser.getTok().getEndLoc();
5074 Parser.Lex(); // Eat right bracket token.
5075
5076 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5077 ShiftType, ShiftImm, 0, isNegative,
5078 S, E));
5079
5080 // If there's a pre-indexing writeback marker, '!', just add it as a token
5081 // operand.
5082 if (Parser.getTok().is(AsmToken::Exclaim)) {
5083 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5084 Parser.Lex(); // Eat the '!'.
5085 }
5086
5087 return false;
5088}
5089
5090/// parseMemRegOffsetShift - one of these two:
5091/// ( lsl | lsr | asr | ror ) , # shift_amount
5092/// rrx
5093/// return true if it parses a shift otherwise it returns false.
5094bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5095 unsigned &Amount) {
5096 MCAsmParser &Parser = getParser();
5097 SMLoc Loc = Parser.getTok().getLoc();
5098 const AsmToken &Tok = Parser.getTok();
5099 if (Tok.isNot(AsmToken::Identifier))
5100 return Error(Loc, "illegal shift operator");
5101 StringRef ShiftName = Tok.getString();
5102 if (ShiftName == "lsl" || ShiftName == "LSL" ||
5103 ShiftName == "asl" || ShiftName == "ASL")
5104 St = ARM_AM::lsl;
5105 else if (ShiftName == "lsr" || ShiftName == "LSR")
5106 St = ARM_AM::lsr;
5107 else if (ShiftName == "asr" || ShiftName == "ASR")
5108 St = ARM_AM::asr;
5109 else if (ShiftName == "ror" || ShiftName == "ROR")
5110 St = ARM_AM::ror;
5111 else if (ShiftName == "rrx" || ShiftName == "RRX")
5112 St = ARM_AM::rrx;
5113 else
5114 return Error(Loc, "illegal shift operator");
5115 Parser.Lex(); // Eat shift type token.
5116
5117 // rrx stands alone.
5118 Amount = 0;
5119 if (St != ARM_AM::rrx) {
5120 Loc = Parser.getTok().getLoc();
5121 // A '#' and a shift amount.
5122 const AsmToken &HashTok = Parser.getTok();
5123 if (HashTok.isNot(AsmToken::Hash) &&
5124 HashTok.isNot(AsmToken::Dollar))
5125 return Error(HashTok.getLoc(), "'#' expected");
5126 Parser.Lex(); // Eat hash token.
5127
5128 const MCExpr *Expr;
5129 if (getParser().parseExpression(Expr))
5130 return true;
5131 // Range check the immediate.
5132 // lsl, ror: 0 <= imm <= 31
5133 // lsr, asr: 0 <= imm <= 32
5134 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5135 if (!CE)
5136 return Error(Loc, "shift amount must be an immediate");
5137 int64_t Imm = CE->getValue();
5138 if (Imm < 0 ||
5139 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5140 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5141 return Error(Loc, "immediate shift value out of range");
5142 // If <ShiftTy> #0, turn it into a no_shift.
5143 if (Imm == 0)
5144 St = ARM_AM::lsl;
5145 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5146 if (Imm == 32)
5147 Imm = 0;
5148 Amount = Imm;
5149 }
5150
5151 return false;
5152}
5153
5154/// parseFPImm - A floating point immediate expression operand.
5155OperandMatchResultTy
5156ARMAsmParser::parseFPImm(OperandVector &Operands) {
5157 MCAsmParser &Parser = getParser();
5158 // Anything that can accept a floating point constant as an operand
5159 // needs to go through here, as the regular parseExpression is
5160 // integer only.
5161 //
5162 // This routine still creates a generic Immediate operand, containing
5163 // a bitcast of the 64-bit floating point value. The various operands
5164 // that accept floats can check whether the value is valid for them
5165 // via the standard is*() predicates.
5166
5167 SMLoc S = Parser.getTok().getLoc();
5168
5169 if (Parser.getTok().isNot(AsmToken::Hash) &&
5170 Parser.getTok().isNot(AsmToken::Dollar))
5171 return MatchOperand_NoMatch;
5172
5173 // Disambiguate the VMOV forms that can accept an FP immediate.
5174 // vmov.f32 <sreg>, #imm
5175 // vmov.f64 <dreg>, #imm
5176 // vmov.f32 <dreg>, #imm @ vector f32x2
5177 // vmov.f32 <qreg>, #imm @ vector f32x4
5178 //
5179 // There are also the NEON VMOV instructions which expect an
5180 // integer constant. Make sure we don't try to parse an FPImm
5181 // for these:
5182 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5183 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5184 bool isVmovf = TyOp.isToken() &&
5185 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5186 TyOp.getToken() == ".f16");
5187 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5188 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5189 Mnemonic.getToken() == "fconsts");
5190 if (!(isVmovf || isFconst))
5191 return MatchOperand_NoMatch;
5192
5193 Parser.Lex(); // Eat '#' or '$'.
5194
5195 // Handle negation, as that still comes through as a separate token.
5196 bool isNegative = false;
5197 if (Parser.getTok().is(AsmToken::Minus)) {
5198 isNegative = true;
5199 Parser.Lex();
5200 }
5201 const AsmToken &Tok = Parser.getTok();
5202 SMLoc Loc = Tok.getLoc();
5203 if (Tok.is(AsmToken::Real) && isVmovf) {
5204 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5205 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5206 // If we had a '-' in front, toggle the sign bit.
5207 IntVal ^= (uint64_t)isNegative << 31;
5208 Parser.Lex(); // Eat the token.
5209 Operands.push_back(ARMOperand::CreateImm(
5210 MCConstantExpr::create(IntVal, getContext()),
5211 S, Parser.getTok().getLoc()));
5212 return MatchOperand_Success;
5213 }
5214 // Also handle plain integers. Instructions which allow floating point
5215 // immediates also allow a raw encoded 8-bit value.
5216 if (Tok.is(AsmToken::Integer) && isFconst) {
5217 int64_t Val = Tok.getIntVal();
5218 Parser.Lex(); // Eat the token.
5219 if (Val > 255 || Val < 0) {
5220 Error(Loc, "encoded floating point value out of range");
5221 return MatchOperand_ParseFail;
5222 }
5223 float RealVal = ARM_AM::getFPImmFloat(Val);
5224 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5225
5226 Operands.push_back(ARMOperand::CreateImm(
5227 MCConstantExpr::create(Val, getContext()), S,
5228 Parser.getTok().getLoc()));
5229 return MatchOperand_Success;
5230 }
5231
5232 Error(Loc, "invalid floating point immediate");
5233 return MatchOperand_ParseFail;
5234}
5235
5236/// Parse a arm instruction operand. For now this parses the operand regardless
5237/// of the mnemonic.
5238bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5239 MCAsmParser &Parser = getParser();
5240 SMLoc S, E;
5241
5242 // Check if the current operand has a custom associated parser, if so, try to
5243 // custom parse the operand, or fallback to the general approach.
5244 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5245 if (ResTy == MatchOperand_Success)
5246 return false;
5247 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5248 // there was a match, but an error occurred, in which case, just return that
5249 // the operand parsing failed.
5250 if (ResTy == MatchOperand_ParseFail)
5251 return true;
5252
5253 switch (getLexer().getKind()) {
5254 default:
5255 Error(Parser.getTok().getLoc(), "unexpected token in operand");
5256 return true;
5257 case AsmToken::Identifier: {
5258 // If we've seen a branch mnemonic, the next operand must be a label. This
5259 // is true even if the label is a register name. So "br r1" means branch to
5260 // label "r1".
5261 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5262 if (!ExpectLabel) {
5263 if (!tryParseRegisterWithWriteBack(Operands))
5264 return false;
5265 int Res = tryParseShiftRegister(Operands);
5266 if (Res == 0) // success
5267 return false;
5268 else if (Res == -1) // irrecoverable error
5269 return true;
5270 // If this is VMRS, check for the apsr_nzcv operand.
5271 if (Mnemonic == "vmrs" &&
5272 Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5273 S = Parser.getTok().getLoc();
5274 Parser.Lex();
5275 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5276 return false;
5277 }
5278 }
5279
5280 // Fall though for the Identifier case that is not a register or a
5281 // special name.
5282 LLVM_FALLTHROUGH[[clang::fallthrough]];
5283 }
5284 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
5285 case AsmToken::Integer: // things like 1f and 2b as a branch targets
5286 case AsmToken::String: // quoted label names.
5287 case AsmToken::Dot: { // . as a branch target
5288 // This was not a register so parse other operands that start with an
5289 // identifier (like labels) as expressions and create them as immediates.
5290 const MCExpr *IdVal;
5291 S = Parser.getTok().getLoc();
5292 if (getParser().parseExpression(IdVal))
5293 return true;
5294 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5295 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5296 return false;
5297 }
5298 case AsmToken::LBrac:
5299 return parseMemory(Operands);
5300 case AsmToken::LCurly:
5301 return parseRegisterList(Operands);
5302 case AsmToken::Dollar:
5303 case AsmToken::Hash:
5304 // #42 -> immediate.
5305 S = Parser.getTok().getLoc();
5306 Parser.Lex();
5307
5308 if (Parser.getTok().isNot(AsmToken::Colon)) {
5309 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5310 const MCExpr *ImmVal;
5311 if (getParser().parseExpression(ImmVal))
5312 return true;
5313 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5314 if (CE) {
5315 int32_t Val = CE->getValue();
5316 if (isNegative && Val == 0)
5317 ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5318 getContext());
5319 }
5320 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5321 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5322
5323 // There can be a trailing '!' on operands that we want as a separate
5324 // '!' Token operand. Handle that here. For example, the compatibility
5325 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5326 if (Parser.getTok().is(AsmToken::Exclaim)) {
5327 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5328 Parser.getTok().getLoc()));
5329 Parser.Lex(); // Eat exclaim token
5330 }
5331 return false;
5332 }
5333 // w/ a ':' after the '#', it's just like a plain ':'.
5334 LLVM_FALLTHROUGH[[clang::fallthrough]];
5335
5336 case AsmToken::Colon: {
5337 S = Parser.getTok().getLoc();
5338 // ":lower16:" and ":upper16:" expression prefixes
5339 // FIXME: Check it's an expression prefix,
5340 // e.g. (FOO - :lower16:BAR) isn't legal.
5341 ARMMCExpr::VariantKind RefKind;
5342 if (parsePrefix(RefKind))
5343 return true;
5344
5345 const MCExpr *SubExprVal;
5346 if (getParser().parseExpression(SubExprVal))
5347 return true;
5348
5349 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5350 getContext());
5351 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5352 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5353 return false;
5354 }
5355 case AsmToken::Equal: {
5356 S = Parser.getTok().getLoc();
5357 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5358 return Error(S, "unexpected token in operand");
5359 Parser.Lex(); // Eat '='
5360 const MCExpr *SubExprVal;
5361 if (getParser().parseExpression(SubExprVal))
5362 return true;
5363 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5364
5365 // execute-only: we assume that assembly programmers know what they are
5366 // doing and allow literal pool creation here
5367 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5368 return false;
5369 }
5370 }
5371}
5372
5373// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5374// :lower16: and :upper16:.
5375bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5376 MCAsmParser &Parser = getParser();
5377 RefKind = ARMMCExpr::VK_ARM_None;
5378
5379 // consume an optional '#' (GNU compatibility)
5380 if (getLexer().is(AsmToken::Hash))
5381 Parser.Lex();
5382
5383 // :lower16: and :upper16: modifiers
5384 assert(getLexer().is(AsmToken::Colon) && "expected a :")(static_cast <bool> (getLexer().is(AsmToken::Colon) &&
"expected a :") ? void (0) : __assert_fail ("getLexer().is(AsmToken::Colon) && \"expected a :\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 5384, __extension__ __PRETTY_FUNCTION__))
;
5385 Parser.Lex(); // Eat ':'
5386
5387 if (getLexer().isNot(AsmToken::Identifier)) {
5388 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5389 return true;
5390 }
5391
5392 enum {
5393 COFF = (1 << MCObjectFileInfo::IsCOFF),
5394 ELF = (1 << MCObjectFileInfo::IsELF),
5395 MACHO = (1 << MCObjectFileInfo::IsMachO),
5396 WASM = (1 << MCObjectFileInfo::IsWasm),
5397 };
5398 static const struct PrefixEntry {
5399 const char *Spelling;
5400 ARMMCExpr::VariantKind VariantKind;
5401 uint8_t SupportedFormats;
5402 } PrefixEntries[] = {
5403 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5404 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5405 };
5406
5407 StringRef IDVal = Parser.getTok().getIdentifier();
5408
5409 const auto &Prefix =
5410 std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5411 [&IDVal](const PrefixEntry &PE) {
5412 return PE.Spelling == IDVal;
5413 });
5414 if (Prefix == std::end(PrefixEntries)) {
5415 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5416 return true;
5417 }
5418
5419 uint8_t CurrentFormat;
5420 switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5421 case MCObjectFileInfo::IsMachO:
5422 CurrentFormat = MACHO;
5423 break;
5424 case MCObjectFileInfo::IsELF:
5425 CurrentFormat = ELF;
5426 break;
5427 case MCObjectFileInfo::IsCOFF:
5428 CurrentFormat = COFF;
5429 break;
5430 case MCObjectFileInfo::IsWasm:
5431 CurrentFormat = WASM;
5432 break;
5433 }
5434
5435 if (~Prefix->SupportedFormats & CurrentFormat) {
5436 Error(Parser.getTok().getLoc(),
5437 "cannot represent relocation in the current file format");
5438 return true;
5439 }
5440
5441 RefKind = Prefix->VariantKind;
5442 Parser.Lex();
5443
5444 if (getLexer().isNot(AsmToken::Colon)) {
5445 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5446 return true;
5447 }
5448 Parser.Lex(); // Eat the last ':'
5449
5450 return false;
5451}
5452
5453/// \brief Given a mnemonic, split out possible predication code and carry
5454/// setting letters to form a canonical mnemonic and flags.
5455//
5456// FIXME: Would be nice to autogen this.
5457// FIXME: This is a bit of a maze of special cases.
5458StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5459 unsigned &PredicationCode,
5460 bool &CarrySetting,
5461 unsigned &ProcessorIMod,
5462 StringRef &ITMask) {
5463 PredicationCode = ARMCC::AL;
5464 CarrySetting = false;
5465 ProcessorIMod = 0;
5466
5467 // Ignore some mnemonics we know aren't predicated forms.
5468 //
5469 // FIXME: Would be nice to autogen this.
5470 if ((Mnemonic == "movs" && isThumb()) ||
5471 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
5472 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
5473 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
5474 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
5475 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
5476 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
5477 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
5478 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5479 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5480 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
5481 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5482 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5483 Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5484 Mnemonic == "bxns" || Mnemonic == "blxns" ||
5485 Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5486 Mnemonic == "vcmla" || Mnemonic == "vcadd")
5487 return Mnemonic;
5488
5489 // First, split out any predication code. Ignore mnemonics we know aren't
5490 // predicated but do have a carry-set and so weren't caught above.
5491 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5492 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5493 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5494 Mnemonic != "sbcs" && Mnemonic != "rscs") {
5495 unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
5496 if (CC != ~0U) {
5497 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5498 PredicationCode = CC;
5499 }
5500 }
5501
5502 // Next, determine if we have a carry setting bit. We explicitly ignore all
5503 // the instructions we know end in 's'.
5504 if (Mnemonic.endswith("s") &&
5505 !(Mnemonic == "cps" || Mnemonic == "mls" ||
5506 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5507 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5508 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5509 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5510 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5511 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5512 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5513 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5514 Mnemonic == "bxns" || Mnemonic == "blxns" ||
5515 (Mnemonic == "movs" && isThumb()))) {
5516 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5517 CarrySetting = true;
5518 }
5519
5520 // The "cps" instruction can have a interrupt mode operand which is glued into
5521 // the mnemonic. Check if this is the case, split it and parse the imod op
5522 if (Mnemonic.startswith("cps")) {
5523 // Split out any imod code.
5524 unsigned IMod =
5525 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5526 .Case("ie", ARM_PROC::IE)
5527 .Case("id", ARM_PROC::ID)
5528 .Default(~0U);
5529 if (IMod != ~0U) {
5530 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5531 ProcessorIMod = IMod;
5532 }
5533 }
5534
5535 // The "it" instruction has the condition mask on the end of the mnemonic.
5536 if (Mnemonic.startswith("it")) {
5537 ITMask = Mnemonic.slice(2, Mnemonic.size());
5538 Mnemonic = Mnemonic.slice(0, 2);
5539 }
5540
5541 return Mnemonic;
5542}
5543
5544/// \brief Given a canonical mnemonic, determine if the instruction ever allows
5545/// inclusion of carry set or predication code operands.
5546//
5547// FIXME: It would be nice to autogen this.
5548void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5549 bool &CanAcceptCarrySet,
5550 bool &CanAcceptPredicationCode) {
5551 CanAcceptCarrySet =
5552 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5553 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5554 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5555 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5556 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5557 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5558 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5559 (!isThumb() &&
5560 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5561 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5562
5563 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5564 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5565 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5566 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5567 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5568 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5569 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5570 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5571 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5572 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5573 (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
5574 Mnemonic == "vmovx" || Mnemonic == "vins" ||
5575 Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5576 Mnemonic == "vcmla" || Mnemonic == "vcadd") {
5577 // These mnemonics are never predicable
5578 CanAcceptPredicationCode = false;
5579 } else if (!isThumb()) {
5580 // Some instructions are only predicable in Thumb mode
5581 CanAcceptPredicationCode =
5582 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5583 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5584 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
5585 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
5586 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
5587 Mnemonic != "stc2" && Mnemonic != "stc2l" &&
5588 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
5589 } else if (isThumbOne()) {
5590 if (hasV6MOps())
5591 CanAcceptPredicationCode = Mnemonic != "movs";
5592 else
5593 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5594 } else
5595 CanAcceptPredicationCode = true;
5596}
5597
5598// \brief Some Thumb instructions have two operand forms that are not
5599// available as three operand, convert to two operand form if possible.
5600//
5601// FIXME: We would really like to be able to tablegen'erate this.
5602void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5603 bool CarrySetting,
5604 OperandVector &Operands) {
5605 if (Operands.size() != 6)
5606 return;
5607
5608 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5609 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5610 if (!Op3.isReg() || !Op4.isReg())
5611 return;
5612
5613 auto Op3Reg = Op3.getReg();
5614 auto Op4Reg = Op4.getReg();
5615
5616 // For most Thumb2 cases we just generate the 3 operand form and reduce
5617 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5618 // won't accept SP or PC so we do the transformation here taking care
5619 // with immediate range in the 'add sp, sp #imm' case.
5620 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5621 if (isThumbTwo()) {
5622 if (Mnemonic != "add")
5623 return;
5624 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5625 (Op5.isReg() && Op5.getReg() == ARM::PC);
5626 if (!TryTransform) {
5627 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5628 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5629 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5630 Op5.isImm() && !Op5.isImm0_508s4());
5631 }
5632 if (!TryTransform)
5633 return;
5634 } else if (!isThumbOne())
5635 return;
5636
5637 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5638 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5639 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5640 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5641 return;
5642
5643 // If first 2 operands of a 3 operand instruction are the same
5644 // then transform to 2 operand version of the same instruction
5645 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5646 bool Transform = Op3Reg == Op4Reg;
5647
5648 // For communtative operations, we might be able to transform if we swap
5649 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
5650 // as tADDrsp.
5651 const ARMOperand *LastOp = &Op5;
5652 bool Swap = false;
5653 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5654 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5655 Mnemonic == "and" || Mnemonic == "eor" ||
5656 Mnemonic == "adc" || Mnemonic == "orr")) {
5657 Swap = true;
5658 LastOp = &Op4;
5659 Transform = true;
5660 }
5661
5662 // If both registers are the same then remove one of them from
5663 // the operand list, with certain exceptions.
5664 if (Transform) {
5665 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5666 // 2 operand forms don't exist.
5667 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5668 LastOp->isReg())
5669 Transform = false;
5670
5671 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5672 // 3-bits because the ARMARM says not to.
5673 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5674 Transform = false;
5675 }
5676
5677 if (Transform) {
5678 if (Swap)
5679 std::swap(Op4, Op5);
5680 Operands.erase(Operands.begin() + 3);
5681 }
5682}
5683
5684bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5685 OperandVector &Operands) {
5686 // FIXME: This is all horribly hacky. We really need a better way to deal
5687 // with optional operands like this in the matcher table.
5688
5689 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5690 // another does not. Specifically, the MOVW instruction does not. So we
5691 // special case it here and remove the defaulted (non-setting) cc_out
5692 // operand if that's the instruction we're trying to match.
5693 //
5694 // We do this as post-processing of the explicit operands rather than just
5695 // conditionally adding the cc_out in the first place because we need
5696 // to check the type of the parsed immediate operand.
5697 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5698 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5699 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5700 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5701 return true;
5702
5703 // Register-register 'add' for thumb does not have a cc_out operand
5704 // when there are only two register operands.
5705 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5706 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5707 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5708 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5709 return true;
5710 // Register-register 'add' for thumb does not have a cc_out operand
5711 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5712 // have to check the immediate range here since Thumb2 has a variant
5713 // that can handle a different range and has a cc_out operand.
5714 if (((isThumb() && Mnemonic == "add") ||
5715 (isThumbTwo() && Mnemonic == "sub")) &&
5716 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5717 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5718 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5719 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5720 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5721 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5722 return true;
5723 // For Thumb2, add/sub immediate does not have a cc_out operand for the
5724 // imm0_4095 variant. That's the least-preferred variant when
5725 // selecting via the generic "add" mnemonic, so to know that we
5726 // should remove the cc_out operand, we have to explicitly check that
5727 // it's not one of the other variants. Ugh.
5728 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5729 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5730 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5731 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5732 // Nest conditions rather than one big 'if' statement for readability.
5733 //
5734 // If both registers are low, we're in an IT block, and the immediate is
5735 // in range, we should use encoding T1 instead, which has a cc_out.
5736 if (inITBlock() &&
5737 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5738 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5739 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5740 return false;
5741 // Check against T3. If the second register is the PC, this is an
5742 // alternate form of ADR, which uses encoding T4, so check for that too.
5743 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5744 static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5745 return false;
5746
5747 // Otherwise, we use encoding T4, which does not have a cc_out
5748 // operand.
5749 return true;
5750 }
5751
5752 // The thumb2 multiply instruction doesn't have a CCOut register, so
5753 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5754 // use the 16-bit encoding or not.
5755 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5756 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5757 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5758 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5759 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5760 // If the registers aren't low regs, the destination reg isn't the
5761 // same as one of the source regs, or the cc_out operand is zero
5762 // outside of an IT block, we have to use the 32-bit encoding, so
5763 // remove the cc_out operand.
5764 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5765 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5766 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5767 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5768 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5769 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5770 static_cast<ARMOperand &>(*Operands[4]).getReg())))
5771 return true;
5772
5773 // Also check the 'mul' syntax variant that doesn't specify an explicit
5774 // destination register.
5775 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5776 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5777 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5778 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5779 // If the registers aren't low regs or the cc_out operand is zero
5780 // outside of an IT block, we have to use the 32-bit encoding, so
5781 // remove the cc_out operand.
5782 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5783 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5784 !inITBlock()))
5785 return true;
5786
5787 // Register-register 'add/sub' for thumb does not have a cc_out operand
5788 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5789 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5790 // right, this will result in better diagnostics (which operand is off)
5791 // anyway.
5792 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5793 (Operands.size() == 5 || Operands.size() == 6) &&
5794 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5795 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5796 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5797 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5798 (Operands.size() == 6 &&
5799 static_cast<ARMOperand &>(*Operands[5]).isImm())))
5800 return true;
5801
5802 return false;
5803}
5804
5805bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5806 OperandVector &Operands) {
5807 // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
5808 unsigned RegIdx = 3;
5809 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") &&
5810 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5811 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5812 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5813 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5814 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5815 RegIdx = 4;
5816
5817 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5818 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5819 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5820 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5821 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5822 return true;
5823 }
5824 return false;
5825}
5826
5827static bool isDataTypeToken(StringRef Tok) {
5828 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5829 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5830 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5831 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5832 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5833 Tok == ".f" || Tok == ".d";
5834}
5835
5836// FIXME: This bit should probably be handled via an explicit match class
5837// in the .td files that matches the suffix instead of having it be
5838// a literal string token the way it is now.
5839static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5840 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5841}
5842
5843static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5844 unsigned VariantID);
5845
5846// The GNU assembler has aliases of ldrd and strd with the second register
5847// omitted. We don't have a way to do that in tablegen, so fix it up here.
5848//
5849// We have to be careful to not emit an invalid Rt2 here, because the rest of
5850// the assmebly parser could then generate confusing diagnostics refering to
5851// it. If we do find anything that prevents us from doing the transformation we
5852// bail out, and let the assembly parser report an error on the instruction as
5853// it is written.
5854void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
5855 OperandVector &Operands) {
5856 if (Mnemonic != "ldrd" && Mnemonic != "strd")
5857 return;
5858 if (Operands.size() < 4)
5859 return;
5860
5861 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5862 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5863
5864 if (!Op2.isReg())
5865 return;
5866 if (!Op3.isMem())
5867 return;
5868
5869 const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
5870 if (!GPR.contains(Op2.getReg()))
5871 return;
5872
5873 unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
5874 if (!isThumb() && (RtEncoding & 1)) {
5875 // In ARM mode, the registers must be from an aligned pair, this
5876 // restriction does not apply in Thumb mode.
5877 return;
5878 }
5879 if (Op2.getReg() == ARM::PC)
5880 return;
5881 unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
5882 if (!PairedReg || PairedReg == ARM::PC ||
5883 (PairedReg == ARM::SP && !hasV8Ops()))
5884 return;
5885
5886 Operands.insert(
5887 Operands.begin() + 3,
5888 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
5889}
5890
5891/// Parse an arm instruction mnemonic followed by its operands.
5892bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5893 SMLoc NameLoc, OperandVector &Operands) {
5894 MCAsmParser &Parser = getParser();
5895
5896 // Apply mnemonic aliases before doing anything else, as the destination
5897 // mnemonic may include suffices and we want to handle them normally.
5898 // The generic tblgen'erated code does this later, at the start of
5899 // MatchInstructionImpl(), but that's too late for aliases that include
5900 // any sort of suffix.
5901 uint64_t AvailableFeatures = getAvailableFeatures();
5902 unsigned AssemblerDialect = getParser().getAssemblerDialect();
5903 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5904
5905 // First check for the ARM-specific .req directive.
5906 if (Parser.getTok().is(AsmToken::Identifier) &&
5907 Parser.getTok().getIdentifier() == ".req") {
5908 parseDirectiveReq(Name, NameLoc);
5909 // We always return 'error' for this, as we're done with this
5910 // statement and don't need to match the 'instruction."
5911 return true;
5912 }
5913
5914 // Create the leading tokens for the mnemonic, split by '.' characters.
5915 size_t Start = 0, Next = Name.find('.');
5916 StringRef Mnemonic = Name.slice(Start, Next);
5917
5918 // Split out the predication code and carry setting flag from the mnemonic.
5919 unsigned PredicationCode;
5920 unsigned ProcessorIMod;
5921 bool CarrySetting;
5922 StringRef ITMask;
5923 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5924 ProcessorIMod, ITMask);
5925
5926 // In Thumb1, only the branch (B) instruction can be predicated.
5927 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5928 return Error(NameLoc, "conditional execution not supported in Thumb1");
5929 }
5930
5931 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5932
5933 // Handle the IT instruction ITMask. Convert it to a bitmask. This
5934 // is the mask as it will be for the IT encoding if the conditional
5935 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5936 // where the conditional bit0 is zero, the instruction post-processing
5937 // will adjust the mask accordingly.
5938 if (Mnemonic == "it") {
5939 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5940 if (ITMask.size() > 3) {
5941 return Error(Loc, "too many conditions on IT instruction");
5942 }
5943 unsigned Mask = 8;
5944 for (unsigned i = ITMask.size(); i != 0; --i) {
5945 char pos = ITMask[i - 1];
5946 if (pos != 't' && pos != 'e') {
5947 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5948 }
5949 Mask >>= 1;
5950 if (ITMask[i - 1] == 't')
5951 Mask |= 8;
5952 }
5953 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5954 }
5955
5956 // FIXME: This is all a pretty gross hack. We should automatically handle
5957 // optional operands like this via tblgen.
5958
5959 // Next, add the CCOut and ConditionCode operands, if needed.
5960 //
5961 // For mnemonics which can ever incorporate a carry setting bit or predication
5962 // code, our matching model involves us always generating CCOut and
5963 // ConditionCode operands to match the mnemonic "as written" and then we let
5964 // the matcher deal with finding the right instruction or generating an
5965 // appropriate error.
5966 bool CanAcceptCarrySet, CanAcceptPredicationCode;
5967 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5968
5969 // If we had a carry-set on an instruction that can't do that, issue an
5970 // error.
5971 if (!CanAcceptCarrySet && CarrySetting) {
5972 return Error(NameLoc, "instruction '" + Mnemonic +
5973 "' can not set flags, but 's' suffix specified");
5974 }
5975 // If we had a predication code on an instruction that can't do that, issue an
5976 // error.
5977 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5978 return Error(NameLoc, "instruction '" + Mnemonic +
5979 "' is not predicable, but condition code specified");
5980 }
5981
5982 // Add the carry setting operand, if necessary.
5983 if (CanAcceptCarrySet) {
5984 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5985 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5986 Loc));
5987 }
5988
5989 // Add the predication code operand, if necessary.
5990 if (CanAcceptPredicationCode) {
5991 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5992 CarrySetting);
5993 Operands.push_back(ARMOperand::CreateCondCode(
5994 ARMCC::CondCodes(PredicationCode), Loc));
5995 }
5996
5997 // Add the processor imod operand, if necessary.
5998 if (ProcessorIMod) {
5999 Operands.push_back(ARMOperand::CreateImm(
6000 MCConstantExpr::create(ProcessorIMod, getContext()),
6001 NameLoc, NameLoc));
6002 } else if (Mnemonic == "cps" && isMClass()) {
6003 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6004 }
6005
6006 // Add the remaining tokens in the mnemonic.
6007 while (Next != StringRef::npos) {
6008 Start = Next;
6009 Next = Name.find('.', Start + 1);
6010 StringRef ExtraToken = Name.slice(Start, Next);
6011
6012 // Some NEON instructions have an optional datatype suffix that is
6013 // completely ignored. Check for that.
6014 if (isDataTypeToken(ExtraToken) &&
6015 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6016 continue;
6017
6018 // For for ARM mode generate an error if the .n qualifier is used.
6019 if (ExtraToken == ".n" && !isThumb()) {
6020 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6021 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6022 "arm mode");
6023 }
6024
6025 // The .n qualifier is always discarded as that is what the tables
6026 // and matcher expect. In ARM mode the .w qualifier has no effect,
6027 // so discard it to avoid errors that can be caused by the matcher.
6028 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6029 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6030 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6031 }
6032 }
6033
6034 // Read the remaining operands.
6035 if (getLexer().isNot(AsmToken::EndOfStatement)) {
6036 // Read the first operand.
6037 if (parseOperand(Operands, Mnemonic)) {
6038 return true;
6039 }
6040
6041 while (parseOptionalToken(AsmToken::Comma)) {
6042 // Parse and remember the operand.
6043 if (parseOperand(Operands, Mnemonic)) {
6044 return true;
6045 }
6046 }
6047 }
6048
6049 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
6050 return true;
6051
6052 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6053
6054 // Some instructions, mostly Thumb, have forms for the same mnemonic that
6055 // do and don't have a cc_out optional-def operand. With some spot-checks
6056 // of the operand list, we can figure out which variant we're trying to
6057 // parse and adjust accordingly before actually matching. We shouldn't ever
6058 // try to remove a cc_out operand that was explicitly set on the
6059 // mnemonic, of course (CarrySetting == true). Reason number #317 the
6060 // table driven matcher doesn't fit well with the ARM instruction set.
6061 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6062 Operands.erase(Operands.begin() + 1);
6063
6064 // Some instructions have the same mnemonic, but don't always
6065 // have a predicate. Distinguish them here and delete the
6066 // predicate if needed.
6067 if (PredicationCode == ARMCC::AL &&
6068 shouldOmitPredicateOperand(Mnemonic, Operands))
6069 Operands.erase(Operands.begin() + 1);
6070
6071 // ARM mode 'blx' need special handling, as the register operand version
6072 // is predicable, but the label operand version is not. So, we can't rely
6073 // on the Mnemonic based checking to correctly figure out when to put
6074 // a k_CondCode operand in the list. If we're trying to match the label
6075 // version, remove the k_CondCode operand here.
6076 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6077 static_cast<ARMOperand &>(*Operands[2]).isImm())
6078 Operands.erase(Operands.begin() + 1);
6079
6080 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6081 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6082 // a single GPRPair reg operand is used in the .td file to replace the two
6083 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
6084 // expressed as a GPRPair, so we have to manually merge them.
6085 // FIXME: We would really like to be able to tablegen'erate this.
6086 if (!isThumb() && Operands.size() > 4 &&
6087 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6088 Mnemonic == "stlexd")) {
6089 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6090 unsigned Idx = isLoad ? 2 : 3;
6091 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6092 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6093
6094 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
6095 // Adjust only if Op1 and Op2 are GPRs.
6096 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6097 MRC.contains(Op2.getReg())) {
6098 unsigned Reg1 = Op1.getReg();
6099 unsigned Reg2 = Op2.getReg();
6100 unsigned Rt = MRI->getEncodingValue(Reg1);
6101 unsigned Rt2 = MRI->getEncodingValue(Reg2);
6102
6103 // Rt2 must be Rt + 1 and Rt must be even.
6104 if (Rt + 1 != Rt2 || (Rt & 1)) {
6105 return Error(Op2.getStartLoc(),
6106 isLoad ? "destination operands must be sequential"
6107 : "source operands must be sequential");
6108 }
6109 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
6110 &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6111 Operands[Idx] =
6112 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6113 Operands.erase(Operands.begin() + Idx + 1);
6114 }
6115 }
6116
6117 // GNU Assembler extension (compatibility).
6118 fixupGNULDRDAlias(Mnemonic, Operands);
6119
6120 // FIXME: As said above, this is all a pretty gross hack. This instruction
6121 // does not fit with other "subs" and tblgen.
6122 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6123 // so the Mnemonic is the original name "subs" and delete the predicate
6124 // operand so it will match the table entry.
6125 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6126 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6127 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6128 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6129 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6130 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6131 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6132 Operands.erase(Operands.begin() + 1);
6133 }
6134 return false;
6135}
6136
6137// Validate context-sensitive operand constraints.
6138
6139// return 'true' if register list contains non-low GPR registers,
6140// 'false' otherwise. If Reg is in the register list or is HiReg, set
6141// 'containsReg' to true.
6142static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6143 unsigned Reg, unsigned HiReg,
6144 bool &containsReg) {
6145 containsReg = false;
6146 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6147 unsigned OpReg = Inst.getOperand(i).getReg();
6148 if (OpReg == Reg)
6149 containsReg = true;
6150 // Anything other than a low register isn't legal here.
6151 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6152 return true;
6153 }
6154 return false;
6155}
6156
6157// Check if the specified regisgter is in the register list of the inst,
6158// starting at the indicated operand number.
6159static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6160 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6161 unsigned OpReg = Inst.getOperand(i).getReg();
6162 if (OpReg == Reg)
6163 return true;
6164 }
6165 return false;
6166}
6167
6168// Return true if instruction has the interesting property of being
6169// allowed in IT blocks, but not being predicable.
6170static bool instIsBreakpoint(const MCInst &Inst) {
6171 return Inst.getOpcode() == ARM::tBKPT ||
6172 Inst.getOpcode() == ARM::BKPT ||
6173 Inst.getOpcode() == ARM::tHLT ||
6174 Inst.getOpcode() == ARM::HLT;
6175}
6176
6177bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6178 const OperandVector &Operands,
6179 unsigned ListNo, bool IsARPop) {
6180 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6181 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6182
6183 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6184 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6185 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6186
6187 if (!IsARPop && ListContainsSP)
6188 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6189 "SP may not be in the register list");
6190 else if (ListContainsPC && ListContainsLR)
6191 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6192 "PC and LR may not be in the register list simultaneously");
6193 return false;
6194}
6195
6196bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6197 const OperandVector &Operands,
6198 unsigned ListNo) {
6199 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6200 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6201
6202 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6203 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6204
6205 if (ListContainsSP && ListContainsPC)
6206 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6207 "SP and PC may not be in the register list");
6208 else if (ListContainsSP)
6209 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6210 "SP may not be in the register list");
6211 else if (ListContainsPC)
6212 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6213 "PC may not be in the register list");
6214 return false;
6215}
6216
6217// FIXME: We would really like to be able to tablegen'erate this.
6218bool ARMAsmParser::validateInstruction(MCInst &Inst,
6219 const OperandVector &Operands) {
6220 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6221 SMLoc Loc = Operands[0]->getStartLoc();
6222
6223 // Check the IT block state first.
6224 // NOTE: BKPT and HLT instructions have the interesting property of being
6225 // allowed in IT blocks, but not being predicable. They just always execute.
6226 if (inITBlock() && !instIsBreakpoint(Inst)) {
6227 // The instruction must be predicable.
6228 if (!MCID.isPredicable())
6229 return Error(Loc, "instructions in IT block must be predicable");
6230 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
6231 if (Cond != currentITCond()) {
6232 // Find the condition code Operand to get its SMLoc information.
6233 SMLoc CondLoc;
6234 for (unsigned I = 1; I < Operands.size(); ++I)
6235 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6236 CondLoc = Operands[I]->getStartLoc();
6237 return Error(CondLoc, "incorrect condition in IT block; got '" +
6238 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
6239 "', but expected '" +
6240 ARMCondCodeToString(ARMCC::CondCodes(currentITCond())) + "'");
6241 }
6242 // Check for non-'al' condition codes outside of the IT block.
6243 } else if (isThumbTwo() && MCID.isPredicable() &&
6244 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6245 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6246 Inst.getOpcode() != ARM::t2Bcc) {
6247 return Error(Loc, "predicated instructions must be in IT block");
6248 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6249 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6250 ARMCC::AL) {
6251 return Warning(Loc, "predicated instructions should be in IT block");
6252 }
6253
6254 // PC-setting instructions in an IT block, but not the last instruction of
6255 // the block, are UNPREDICTABLE.
6256 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
6257 return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
6258 }
6259
6260 const unsigned Opcode = Inst.getOpcode();
6261 switch (Opcode) {
6262 case ARM::LDRD:
6263 case ARM::LDRD_PRE:
6264 case ARM::LDRD_POST: {
6265 const unsigned RtReg = Inst.getOperand(0).getReg();
6266
6267 // Rt can't be R14.
6268 if (RtReg == ARM::LR)
6269 return Error(Operands[3]->getStartLoc(),
6270 "Rt can't be R14");
6271
6272 const unsigned Rt = MRI->getEncodingValue(RtReg);
6273 // Rt must be even-numbered.
6274 if ((Rt & 1) == 1)
6275 return Error(Operands[3]->getStartLoc(),
6276 "Rt must be even-numbered");
6277
6278 // Rt2 must be Rt + 1.
6279 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6280 if (Rt2 != Rt + 1)
6281 return Error(Operands[3]->getStartLoc(),
6282 "destination operands must be sequential");
6283
6284 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6285 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6286 // For addressing modes with writeback, the base register needs to be
6287 // different from the destination registers.
6288 if (Rn == Rt || Rn == Rt2)
6289 return Error(Operands[3]->getStartLoc(),
6290 "base register needs to be different from destination "
6291 "registers");
6292 }
6293
6294 return false;
6295 }
6296 case ARM::t2LDRDi8:
6297 case ARM::t2LDRD_PRE:
6298 case ARM::t2LDRD_POST: {
6299 // Rt2 must be different from Rt.
6300 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6301 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6302 if (Rt2 == Rt)
6303 return Error(Operands[3]->getStartLoc(),
6304 "destination operands can't be identical");
6305 return false;
6306 }
6307 case ARM::t2BXJ: {
6308 const unsigned RmReg = Inst.getOperand(0).getReg();
6309 // Rm = SP is no longer unpredictable in v8-A
6310 if (RmReg == ARM::SP && !hasV8Ops())
6311 return Error(Operands[2]->getStartLoc(),
6312 "r13 (SP) is an unpredictable operand to BXJ");
6313 return false;
6314 }
6315 case ARM::STRD: {
6316 // Rt2 must be Rt + 1.
6317 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6318 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6319 if (Rt2 != Rt + 1)
6320 return Error(Operands[3]->getStartLoc(),
6321 "source operands must be sequential");
6322 return false;
6323 }
6324 case ARM::STRD_PRE:
6325 case ARM::STRD_POST: {
6326 // Rt2 must be Rt + 1.
6327 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6328 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6329 if (Rt2 != Rt + 1)
6330 return Error(Operands[3]->getStartLoc(),
6331 "source operands must be sequential");
6332 return false;
6333 }
6334 case ARM::STR_PRE_IMM:
6335 case ARM::STR_PRE_REG:
6336 case ARM::STR_POST_IMM:
6337 case ARM::STR_POST_REG:
6338 case ARM::STRH_PRE:
6339 case ARM::STRH_POST:
6340 case ARM::STRB_PRE_IMM:
6341 case ARM::STRB_PRE_REG:
6342 case ARM::STRB_POST_IMM:
6343 case ARM::STRB_POST_REG: {
6344 // Rt must be different from Rn.
6345 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6346 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6347
6348 if (Rt == Rn)
6349 return Error(Operands[3]->getStartLoc(),
6350 "source register and base register can't be identical");
6351 return false;
6352 }
6353 case ARM::LDR_PRE_IMM:
6354 case ARM::LDR_PRE_REG:
6355 case ARM::LDR_POST_IMM:
6356 case ARM::LDR_POST_REG:
6357 case ARM::LDRH_PRE:
6358 case ARM::LDRH_POST:
6359 case ARM::LDRSH_PRE:
6360 case ARM::LDRSH_POST:
6361 case ARM::LDRB_PRE_IMM:
6362 case ARM::LDRB_PRE_REG:
6363 case ARM::LDRB_POST_IMM:
6364 case ARM::LDRB_POST_REG:
6365 case ARM::LDRSB_PRE:
6366 case ARM::LDRSB_POST: {
6367 // Rt must be different from Rn.
6368 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6369 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6370
6371 if (Rt == Rn)
6372 return Error(Operands[3]->getStartLoc(),
6373 "destination register and base register can't be identical");
6374 return false;
6375 }
6376 case ARM::SBFX:
6377 case ARM::UBFX: {
6378 // Width must be in range [1, 32-lsb].
6379 unsigned LSB = Inst.getOperand(2).getImm();
6380 unsigned Widthm1 = Inst.getOperand(3).getImm();
6381 if (Widthm1 >= 32 - LSB)
6382 return Error(Operands[5]->getStartLoc(),
6383 "bitfield width must be in range [1,32-lsb]");
6384 return false;
6385 }
6386 // Notionally handles ARM::tLDMIA_UPD too.
6387 case ARM::tLDMIA: {
6388 // If we're parsing Thumb2, the .w variant is available and handles
6389 // most cases that are normally illegal for a Thumb1 LDM instruction.
6390 // We'll make the transformation in processInstruction() if necessary.
6391 //
6392 // Thumb LDM instructions are writeback iff the base register is not
6393 // in the register list.
6394 unsigned Rn = Inst.getOperand(0).getReg();
6395 bool HasWritebackToken =
6396 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6397 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6398 bool ListContainsBase;
6399 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6400 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6401 "registers must be in range r0-r7");
6402 // If we should have writeback, then there should be a '!' token.
6403 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6404 return Error(Operands[2]->getStartLoc(),
6405 "writeback operator '!' expected");
6406 // If we should not have writeback, there must not be a '!'. This is
6407 // true even for the 32-bit wide encodings.
6408 if (ListContainsBase && HasWritebackToken)
6409 return Error(Operands[3]->getStartLoc(),
6410 "writeback operator '!' not allowed when base register "
6411 "in register list");
6412
6413 if (validatetLDMRegList(Inst, Operands, 3))
6414 return true;
6415 break;
6416 }
6417 case ARM::LDMIA_UPD:
6418 case ARM::LDMDB_UPD:
6419 case ARM::LDMIB_UPD:
6420 case ARM::LDMDA_UPD:
6421 // ARM variants loading and updating the same register are only officially
6422 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6423 if (!hasV7Ops())
6424 break;
6425 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6426 return Error(Operands.back()->getStartLoc(),
6427 "writeback register not allowed in register list");
6428 break;
6429 case ARM::t2LDMIA:
6430 case ARM::t2LDMDB:
6431 if (validatetLDMRegList(Inst, Operands, 3))
6432 return true;
6433 break;
6434 case ARM::t2STMIA:
6435 case ARM::t2STMDB:
6436 if (validatetSTMRegList(Inst, Operands, 3))
6437 return true;
6438 break;
6439 case ARM::t2LDMIA_UPD:
6440 case ARM::t2LDMDB_UPD:
6441 case ARM::t2STMIA_UPD:
6442 case ARM::t2STMDB_UPD:
6443 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6444 return Error(Operands.back()->getStartLoc(),
6445 "writeback register not allowed in register list");
6446
6447 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6448 if (validatetLDMRegList(Inst, Operands, 3))
6449 return true;
6450 } else {
6451 if (validatetSTMRegList(Inst, Operands, 3))
6452 return true;
6453 }
6454 break;
6455
6456 case ARM::sysLDMIA_UPD:
6457 case ARM::sysLDMDA_UPD:
6458 case ARM::sysLDMDB_UPD:
6459 case ARM::sysLDMIB_UPD:
6460 if (!listContainsReg(Inst, 3, ARM::PC))
6461 return Error(Operands[4]->getStartLoc(),
6462 "writeback register only allowed on system LDM "
6463 "if PC in register-list");
6464 break;
6465 case ARM::sysSTMIA_UPD:
6466 case ARM::sysSTMDA_UPD:
6467 case ARM::sysSTMDB_UPD:
6468 case ARM::sysSTMIB_UPD:
6469 return Error(Operands[2]->getStartLoc(),
6470 "system STM cannot have writeback register");
6471 case ARM::tMUL:
6472 // The second source operand must be the same register as the destination
6473 // operand.
6474 //
6475 // In this case, we must directly check the parsed operands because the
6476 // cvtThumbMultiply() function is written in such a way that it guarantees
6477 // this first statement is always true for the new Inst. Essentially, the
6478 // destination is unconditionally copied into the second source operand
6479 // without checking to see if it matches what we actually parsed.
6480 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6481 ((ARMOperand &)*Operands[5]).getReg()) &&
6482 (((ARMOperand &)*Operands[3]).getReg() !=
6483 ((ARMOperand &)*Operands[4]).getReg())) {
6484 return Error(Operands[3]->getStartLoc(),
6485 "destination register must match source register");
6486 }
6487 break;
6488
6489 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6490 // so only issue a diagnostic for thumb1. The instructions will be
6491 // switched to the t2 encodings in processInstruction() if necessary.
6492 case ARM::tPOP: {
6493 bool ListContainsBase;
6494 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6495 !isThumbTwo())
6496 return Error(Operands[2]->getStartLoc(),
6497 "registers must be in range r0-r7 or pc");
6498 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6499 return true;
6500 break;
6501 }
6502 case ARM::tPUSH: {
6503 bool ListContainsBase;
6504 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6505 !isThumbTwo())
6506 return Error(Operands[2]->getStartLoc(),
6507 "registers must be in range r0-r7 or lr");
6508 if (validatetSTMRegList(Inst, Operands, 2))
6509 return true;
6510 break;
6511 }
6512 case ARM::tSTMIA_UPD: {
6513 bool ListContainsBase, InvalidLowList;
6514 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6515 0, ListContainsBase);
6516 if (InvalidLowList && !isThumbTwo())
6517 return Error(Operands[4]->getStartLoc(),
6518 "registers must be in range r0-r7");
6519
6520 // This would be converted to a 32-bit stm, but that's not valid if the
6521 // writeback register is in the list.
6522 if (InvalidLowList && ListContainsBase)
6523 return Error(Operands[4]->getStartLoc(),
6524 "writeback operator '!' not allowed when base register "
6525 "in register list");
6526
6527 if (validatetSTMRegList(Inst, Operands, 4))
6528 return true;
6529 break;
6530 }
6531 case ARM::tADDrSP:
6532 // If the non-SP source operand and the destination operand are not the
6533 // same, we need thumb2 (for the wide encoding), or we have an error.
6534 if (!isThumbTwo() &&
6535 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6536 return Error(Operands[4]->getStartLoc(),
6537 "source register must be the same as destination");
6538 }
6539 break;
6540
6541 // Final range checking for Thumb unconditional branch instructions.
6542 case ARM::tB:
6543 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6544 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6545 break;
6546 case ARM::t2B: {
6547 int op = (Operands[2]->isImm()) ? 2 : 3;
6548 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6549 return Error(Operands[op]->getStartLoc(), "branch target out of range");
6550 break;
6551 }
6552 // Final range checking for Thumb conditional branch instructions.
6553 case ARM::tBcc:
6554 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6555 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6556 break;
6557 case ARM::t2Bcc: {
6558 int Op = (Operands[2]->isImm()) ? 2 : 3;
6559 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6560 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6561 break;
6562 }
6563 case ARM::tCBZ:
6564 case ARM::tCBNZ: {
6565 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
6566 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6567 break;
6568 }
6569 case ARM::MOVi16:
6570 case ARM::MOVTi16:
6571 case ARM::t2MOVi16:
6572 case ARM::t2MOVTi16:
6573 {
6574 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6575 // especially when we turn it into a movw and the expression <symbol> does
6576 // not have a :lower16: or :upper16 as part of the expression. We don't
6577 // want the behavior of silently truncating, which can be unexpected and
6578 // lead to bugs that are difficult to find since this is an easy mistake
6579 // to make.
6580 int i = (Operands[3]->isImm()) ? 3 : 4;
6581 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6583 if (CE) break;
6584 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6585 if (!E) break;
6586 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6587 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6588 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6589 return Error(
6590 Op.getStartLoc(),
6591 "immediate expression for mov requires :lower16: or :upper16");
6592 break;
6593 }
6594 case ARM::HINT:
6595 case ARM::t2HINT:
6596 if (hasRAS()) {
6597 // ESB is not predicable (pred must be AL)
6598 unsigned Imm8 = Inst.getOperand(0).getImm();
6599 unsigned Pred = Inst.getOperand(1).getImm();
6600 if (Imm8 == 0x10 && Pred != ARMCC::AL)
6601 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
6602 "predicable, but condition "
6603 "code specified");
6604 }
6605 // Without the RAS extension, this behaves as any other unallocated hint.
6606 break;
6607 }
6608
6609 return false;
6610}
6611
6612static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6613 switch(Opc) {
6614 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 6614)
;
6615 // VST1LN
6616 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6617 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6618 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6619 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6620 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6621 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6622 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
6623 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6624 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6625
6626 // VST2LN
6627 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6628 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6629 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6630 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6631 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6632
6633 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6634 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6635 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6636 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6637 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6638
6639 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
6640 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6641 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6642 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6643 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6644
6645 // VST3LN
6646 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6647 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6648 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6649 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6650 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6651 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6652 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6653 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6654 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6655 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6656 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
6657 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6658 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6659 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6660 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6661
6662 // VST3
6663 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6664 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6665 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6666 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6667 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6668 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6669 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6670 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6671 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6672 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6673 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6674 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6675 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
6676 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6677 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6678 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
6679 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6680 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6681
6682 // VST4LN
6683 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6684 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6685 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6686 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6687 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6688 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6689 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6690 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6691 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6692 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6693 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
6694 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6695 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6696 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6697 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6698
6699 // VST4
6700 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6701 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6702 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6703 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6704 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6705 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6706 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6707 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6708 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6709 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6710 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6711 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6712 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
6713 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6714 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6715 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
6716 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6717 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6718 }
6719}
6720
6721static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6722 switch(Opc) {
6723 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 6723)
;
6724 // VLD1LN
6725 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6726 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6727 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6728 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6729 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6730 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6731 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
6732 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6733 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6734
6735 // VLD2LN
6736 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6737 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6738 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6739 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6740 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6741 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6742 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6743 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6744 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6745 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6746 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
6747 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6748 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6749 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6750 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6751
6752 // VLD3DUP
6753 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6754 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6755 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6756 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6757 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6758 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6759 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6760 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6761 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6762 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6763 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6764 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6765 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
6766 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6767 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6768 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6769 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6770 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6771
6772 // VLD3LN
6773 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6774 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6775 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6776 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6777 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6778 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6779 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6780 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6781 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6782 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6783 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
6784 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6785 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6786 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6787 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6788
6789 // VLD3
6790 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6791 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6792 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6793 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6794 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6795 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6796 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6797 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6798 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6799 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6800 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6801 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6802 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
6803 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6804 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6805 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
6806 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6807 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6808
6809 // VLD4LN
6810 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6811 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6812 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6813 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6814 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6815 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6816 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6817 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6818 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6819 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6820 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
6821 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6822 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6823 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6824 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6825
6826 // VLD4DUP
6827 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6828 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6829 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6830 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6831 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6832 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6833 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6834 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6835 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6836 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6837 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6838 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6839 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
6840 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6841 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6842 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6843 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6844 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6845
6846 // VLD4
6847 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6848 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6849 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6850 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6851 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6852 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6853 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6854 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6855 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6856 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6857 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6858 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6859 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
6860 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6861 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6862 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
6863 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6864 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6865 }
6866}
6867
6868bool ARMAsmParser::processInstruction(MCInst &Inst,
6869 const OperandVector &Operands,
6870 MCStreamer &Out) {
6871 // Check if we have the wide qualifier, because if it's present we
6872 // must avoid selecting a 16-bit thumb instruction.
6873 bool HasWideQualifier = false;
6874 for (auto &Op : Operands) {
6875 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
6876 if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
6877 HasWideQualifier = true;
6878 break;
6879 }
6880 }
6881
6882 switch (Inst.getOpcode()) {
6883 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6884 case ARM::LDRT_POST:
6885 case ARM::LDRBT_POST: {
6886 const unsigned Opcode =
6887 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6888 : ARM::LDRBT_POST_IMM;
6889 MCInst TmpInst;
6890 TmpInst.setOpcode(Opcode);
6891 TmpInst.addOperand(Inst.getOperand(0));
6892 TmpInst.addOperand(Inst.getOperand(1));
6893 TmpInst.addOperand(Inst.getOperand(1));
6894 TmpInst.addOperand(MCOperand::createReg(0));
6895 TmpInst.addOperand(MCOperand::createImm(0));
6896 TmpInst.addOperand(Inst.getOperand(2));
6897 TmpInst.addOperand(Inst.getOperand(3));
6898 Inst = TmpInst;
6899 return true;
6900 }
6901 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6902 case ARM::STRT_POST:
6903 case ARM::STRBT_POST: {
6904 const unsigned Opcode =
6905 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6906 : ARM::STRBT_POST_IMM;
6907 MCInst TmpInst;
6908 TmpInst.setOpcode(Opcode);
6909 TmpInst.addOperand(Inst.getOperand(1));
6910 TmpInst.addOperand(Inst.getOperand(0));
6911 TmpInst.addOperand(Inst.getOperand(1));
6912 TmpInst.addOperand(MCOperand::createReg(0));
6913 TmpInst.addOperand(MCOperand::createImm(0));
6914 TmpInst.addOperand(Inst.getOperand(2));
6915 TmpInst.addOperand(Inst.getOperand(3));
6916 Inst = TmpInst;
6917 return true;
6918 }
6919 // Alias for alternate form of 'ADR Rd, #imm' instruction.
6920 case ARM::ADDri: {
6921 if (Inst.getOperand(1).getReg() != ARM::PC ||
6922 Inst.getOperand(5).getReg() != 0 ||
6923 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
6924 return false;
6925 MCInst TmpInst;
6926 TmpInst.setOpcode(ARM::ADR);
6927 TmpInst.addOperand(Inst.getOperand(0));
6928 if (Inst.getOperand(2).isImm()) {
6929 // Immediate (mod_imm) will be in its encoded form, we must unencode it
6930 // before passing it to the ADR instruction.
6931 unsigned Enc = Inst.getOperand(2).getImm();
6932 TmpInst.addOperand(MCOperand::createImm(
6933 ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
6934 } else {
6935 // Turn PC-relative expression into absolute expression.
6936 // Reading PC provides the start of the current instruction + 8 and
6937 // the transform to adr is biased by that.
6938 MCSymbol *Dot = getContext().createTempSymbol();
6939 Out.EmitLabel(Dot);
6940 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
6941 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
6942 MCSymbolRefExpr::VK_None,
6943 getContext());
6944 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
6945 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
6946 getContext());
6947 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
6948 getContext());
6949 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
6950 }
6951 TmpInst.addOperand(Inst.getOperand(3));
6952 TmpInst.addOperand(Inst.getOperand(4));
6953 Inst = TmpInst;
6954 return true;
6955 }
6956 // Aliases for alternate PC+imm syntax of LDR instructions.
6957 case ARM::t2LDRpcrel:
6958 // Select the narrow version if the immediate will fit.
6959 if (Inst.getOperand(1).getImm() > 0 &&
6960 Inst.getOperand(1).getImm() <= 0xff &&
6961 !HasWideQualifier)
6962 Inst.setOpcode(ARM::tLDRpci);
6963 else
6964 Inst.setOpcode(ARM::t2LDRpci);
6965 return true;
6966 case ARM::t2LDRBpcrel:
6967 Inst.setOpcode(ARM::t2LDRBpci);
6968 return true;
6969 case ARM::t2LDRHpcrel:
6970 Inst.setOpcode(ARM::t2LDRHpci);
6971 return true;
6972 case ARM::t2LDRSBpcrel:
6973 Inst.setOpcode(ARM::t2LDRSBpci);
6974 return true;
6975 case ARM::t2LDRSHpcrel:
6976 Inst.setOpcode(ARM::t2LDRSHpci);
6977 return true;
6978 case ARM::LDRConstPool:
6979 case ARM::tLDRConstPool:
6980 case ARM::t2LDRConstPool: {
6981 // Pseudo instruction ldr rt, =immediate is converted to a
6982 // MOV rt, immediate if immediate is known and representable
6983 // otherwise we create a constant pool entry that we load from.
6984 MCInst TmpInst;
6985 if (Inst.getOpcode() == ARM::LDRConstPool)
6986 TmpInst.setOpcode(ARM::LDRi12);
6987 else if (Inst.getOpcode() == ARM::tLDRConstPool)
6988 TmpInst.setOpcode(ARM::tLDRpci);
6989 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
6990 TmpInst.setOpcode(ARM::t2LDRpci);
6991 const ARMOperand &PoolOperand =
6992 (HasWideQualifier ?
6993 static_cast<ARMOperand &>(*Operands[4]) :
6994 static_cast<ARMOperand &>(*Operands[3]));
6995 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
6996 // If SubExprVal is a constant we may be able to use a MOV
6997 if (isa<MCConstantExpr>(SubExprVal) &&
6998 Inst.getOperand(0).getReg() != ARM::PC &&
6999 Inst.getOperand(0).getReg() != ARM::SP) {
7000 int64_t Value =
7001 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7002 bool UseMov = true;
7003 bool MovHasS = true;
7004 if (Inst.getOpcode() == ARM::LDRConstPool) {
7005 // ARM Constant
7006 if (ARM_AM::getSOImmVal(Value) != -1) {
7007 Value = ARM_AM::getSOImmVal(Value);
7008 TmpInst.setOpcode(ARM::MOVi);
7009 }
7010 else if (ARM_AM::getSOImmVal(~Value) != -1) {
7011 Value = ARM_AM::getSOImmVal(~Value);
7012 TmpInst.setOpcode(ARM::MVNi);
7013 }
7014 else if (hasV6T2Ops() &&
7015 Value >=0 && Value < 65536) {
7016 TmpInst.setOpcode(ARM::MOVi16);
7017 MovHasS = false;
7018 }
7019 else
7020 UseMov = false;
7021 }
7022 else {
7023 // Thumb/Thumb2 Constant
7024 if (hasThumb2() &&
7025 ARM_AM::getT2SOImmVal(Value) != -1)
7026 TmpInst.setOpcode(ARM::t2MOVi);
7027 else if (hasThumb2() &&
7028 ARM_AM::getT2SOImmVal(~Value) != -1) {
7029 TmpInst.setOpcode(ARM::t2MVNi);
7030 Value = ~Value;
7031 }
7032 else if (hasV8MBaseline() &&
7033 Value >=0 && Value < 65536) {
7034 TmpInst.setOpcode(ARM::t2MOVi16);
7035 MovHasS = false;
7036 }
7037 else
7038 UseMov = false;
7039 }
7040 if (UseMov) {
7041 TmpInst.addOperand(Inst.getOperand(0)); // Rt
7042 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
7043 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7044 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7045 if (MovHasS)
7046 TmpInst.addOperand(MCOperand::createReg(0)); // S
7047 Inst = TmpInst;
7048 return true;
7049 }
7050 }
7051 // No opportunity to use MOV/MVN create constant pool
7052 const MCExpr *CPLoc =
7053 getTargetStreamer().addConstantPoolEntry(SubExprVal,
7054 PoolOperand.getStartLoc());
7055 TmpInst.addOperand(Inst.getOperand(0)); // Rt
7056 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7057 if (TmpInst.getOpcode() == ARM::LDRi12)
7058 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
7059 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7060 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7061 Inst = TmpInst;
7062 return true;
7063 }
7064 // Handle NEON VST complex aliases.
7065 case ARM::VST1LNdWB_register_Asm_8:
7066 case ARM::VST1LNdWB_register_Asm_16:
7067 case ARM::VST1LNdWB_register_Asm_32: {
7068 MCInst TmpInst;
7069 // Shuffle the operands around so the lane index operand is in the
7070 // right place.
7071 unsigned Spacing;
7072 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7073 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7074 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7075 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7076 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7077 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7078 TmpInst.addOperand(Inst.getOperand(1)); // lane
7079 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7080 TmpInst.addOperand(Inst.getOperand(6));
7081 Inst = TmpInst;
7082 return true;
7083 }
7084
7085 case ARM::VST2LNdWB_register_Asm_8:
7086 case ARM::VST2LNdWB_register_Asm_16:
7087 case ARM::VST2LNdWB_register_Asm_32:
7088 case ARM::VST2LNqWB_register_Asm_16:
7089 case ARM::VST2LNqWB_register_Asm_32: {
7090 MCInst TmpInst;
7091 // Shuffle the operands around so the lane index operand is in the
7092 // right place.
7093 unsigned Spacing;
7094 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7095 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7096 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7097 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7098 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7099 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7100 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7101 Spacing));
7102 TmpInst.addOperand(Inst.getOperand(1)); // lane
7103 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7104 TmpInst.addOperand(Inst.getOperand(6));
7105 Inst = TmpInst;
7106 return true;
7107 }
7108
7109 case ARM::VST3LNdWB_register_Asm_8:
7110 case ARM::VST3LNdWB_register_Asm_16:
7111 case ARM::VST3LNdWB_register_Asm_32:
7112 case ARM::VST3LNqWB_register_Asm_16:
7113 case ARM::VST3LNqWB_register_Asm_32: {
7114 MCInst TmpInst;
7115 // Shuffle the operands around so the lane index operand is in the
7116 // right place.
7117 unsigned Spacing;
7118 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7119 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7120 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7121 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7122 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7123 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7124 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7125 Spacing));
7126 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7127 Spacing * 2));
7128 TmpInst.addOperand(Inst.getOperand(1)); // lane
7129 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7130 TmpInst.addOperand(Inst.getOperand(6));
7131 Inst = TmpInst;
7132 return true;
7133 }
7134
7135 case ARM::VST4LNdWB_register_Asm_8:
7136 case ARM::VST4LNdWB_register_Asm_16:
7137 case ARM::VST4LNdWB_register_Asm_32:
7138 case ARM::VST4LNqWB_register_Asm_16:
7139 case ARM::VST4LNqWB_register_Asm_32: {
7140 MCInst TmpInst;
7141 // Shuffle the operands around so the lane index operand is in the
7142 // right place.
7143 unsigned Spacing;
7144 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7145 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7146 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7147 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7148 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7149 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7150 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7151 Spacing));
7152 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7153 Spacing * 2));
7154 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7155 Spacing * 3));
7156 TmpInst.addOperand(Inst.getOperand(1)); // lane
7157 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7158 TmpInst.addOperand(Inst.getOperand(6));
7159 Inst = TmpInst;
7160 return true;
7161 }
7162
7163 case ARM::VST1LNdWB_fixed_Asm_8:
7164 case ARM::VST1LNdWB_fixed_Asm_16:
7165 case ARM::VST1LNdWB_fixed_Asm_32: {
7166 MCInst TmpInst;
7167 // Shuffle the operands around so the lane index operand is in the
7168 // right place.
7169 unsigned Spacing;
7170 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7171 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7172 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7173 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7174 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7175 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7176 TmpInst.addOperand(Inst.getOperand(1)); // lane
7177 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7178 TmpInst.addOperand(Inst.getOperand(5));
7179 Inst = TmpInst;
7180 return true;
7181 }
7182
7183 case ARM::VST2LNdWB_fixed_Asm_8:
7184 case ARM::VST2LNdWB_fixed_Asm_16:
7185 case ARM::VST2LNdWB_fixed_Asm_32:
7186 case ARM::VST2LNqWB_fixed_Asm_16:
7187 case ARM::VST2LNqWB_fixed_Asm_32: {
7188 MCInst TmpInst;
7189 // Shuffle the operands around so the lane index operand is in the
7190 // right place.
7191 unsigned Spacing;
7192 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7193 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7194 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7195 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7196 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7197 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7198 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7199 Spacing));
7200 TmpInst.addOperand(Inst.getOperand(1)); // lane
7201 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7202 TmpInst.addOperand(Inst.getOperand(5));
7203 Inst = TmpInst;
7204 return true;
7205 }
7206
7207 case ARM::VST3LNdWB_fixed_Asm_8:
7208 case ARM::VST3LNdWB_fixed_Asm_16:
7209 case ARM::VST3LNdWB_fixed_Asm_32:
7210 case ARM::VST3LNqWB_fixed_Asm_16:
7211 case ARM::VST3LNqWB_fixed_Asm_32: {
7212 MCInst TmpInst;
7213 // Shuffle the operands around so the lane index operand is in the
7214 // right place.
7215 unsigned Spacing;
7216 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7217 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7218 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7219 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7220 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7221 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7222 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7223 Spacing));
7224 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7225 Spacing * 2));
7226 TmpInst.addOperand(Inst.getOperand(1)); // lane
7227 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7228 TmpInst.addOperand(Inst.getOperand(5));
7229 Inst = TmpInst;
7230 return true;
7231 }
7232
7233 case ARM::VST4LNdWB_fixed_Asm_8:
7234 case ARM::VST4LNdWB_fixed_Asm_16:
7235 case ARM::VST4LNdWB_fixed_Asm_32:
7236 case ARM::VST4LNqWB_fixed_Asm_16:
7237 case ARM::VST4LNqWB_fixed_Asm_32: {
7238 MCInst TmpInst;
7239 // Shuffle the operands around so the lane index operand is in the
7240 // right place.
7241 unsigned Spacing;
7242 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7243 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7244 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7245 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7246 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7247 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7248 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7249 Spacing));
7250 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7251 Spacing * 2));
7252 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7253 Spacing * 3));
7254 TmpInst.addOperand(Inst.getOperand(1)); // lane
7255 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7256 TmpInst.addOperand(Inst.getOperand(5));
7257 Inst = TmpInst;
7258 return true;
7259 }
7260
7261 case ARM::VST1LNdAsm_8:
7262 case ARM::VST1LNdAsm_16:
7263 case ARM::VST1LNdAsm_32: {
7264 MCInst TmpInst;
7265 // Shuffle the operands around so the lane index operand is in the
7266 // right place.
7267 unsigned Spacing;
7268 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7269 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7270 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7271 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7272 TmpInst.addOperand(Inst.getOperand(1)); // lane
7273 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7274 TmpInst.addOperand(Inst.getOperand(5));
7275 Inst = TmpInst;
7276 return true;
7277 }
7278
7279 case ARM::VST2LNdAsm_8:
7280 case ARM::VST2LNdAsm_16:
7281 case ARM::VST2LNdAsm_32:
7282 case ARM::VST2LNqAsm_16:
7283 case ARM::VST2LNqAsm_32: {
7284 MCInst TmpInst;
7285 // Shuffle the operands around so the lane index operand is in the
7286 // right place.
7287 unsigned Spacing;
7288 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7289 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7290 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7291 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7292 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7293 Spacing));
7294 TmpInst.addOperand(Inst.getOperand(1)); // lane
7295 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7296 TmpInst.addOperand(Inst.getOperand(5));
7297 Inst = TmpInst;
7298 return true;
7299 }
7300
7301 case ARM::VST3LNdAsm_8:
7302 case ARM::VST3LNdAsm_16:
7303 case ARM::VST3LNdAsm_32:
7304 case ARM::VST3LNqAsm_16:
7305 case ARM::VST3LNqAsm_32: {
7306 MCInst TmpInst;
7307 // Shuffle the operands around so the lane index operand is in the
7308 // right place.
7309 unsigned Spacing;
7310 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7311 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7312 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7313 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7314 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7315 Spacing));
7316 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7317 Spacing * 2));
7318 TmpInst.addOperand(Inst.getOperand(1)); // lane
7319 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7320 TmpInst.addOperand(Inst.getOperand(5));
7321 Inst = TmpInst;
7322 return true;
7323 }
7324
7325 case ARM::VST4LNdAsm_8:
7326 case ARM::VST4LNdAsm_16:
7327 case ARM::VST4LNdAsm_32:
7328 case ARM::VST4LNqAsm_16:
7329 case ARM::VST4LNqAsm_32: {
7330 MCInst TmpInst;
7331 // Shuffle the operands around so the lane index operand is in the
7332 // right place.
7333 unsigned Spacing;
7334 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7335 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7336 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7337 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7338 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7339 Spacing));
7340 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7341 Spacing * 2));
7342 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7343 Spacing * 3));
7344 TmpInst.addOperand(Inst.getOperand(1)); // lane
7345 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7346 TmpInst.addOperand(Inst.getOperand(5));
7347 Inst = TmpInst;
7348 return true;
7349 }
7350
7351 // Handle NEON VLD complex aliases.
7352 case ARM::VLD1LNdWB_register_Asm_8:
7353 case ARM::VLD1LNdWB_register_Asm_16:
7354 case ARM::VLD1LNdWB_register_Asm_32: {
7355 MCInst TmpInst;
7356 // Shuffle the operands around so the lane index operand is in the
7357 // right place.
7358 unsigned Spacing;
7359 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7360 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7361 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7362 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7363 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7364 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7365 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7366 TmpInst.addOperand(Inst.getOperand(1)); // lane
7367 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7368 TmpInst.addOperand(Inst.getOperand(6));
7369 Inst = TmpInst;
7370 return true;
7371 }
7372
7373 case ARM::VLD2LNdWB_register_Asm_8:
7374 case ARM::VLD2LNdWB_register_Asm_16:
7375 case ARM::VLD2LNdWB_register_Asm_32:
7376 case ARM::VLD2LNqWB_register_Asm_16:
7377 case ARM::VLD2LNqWB_register_Asm_32: {
7378 MCInst TmpInst;
7379 // Shuffle the operands around so the lane index operand is in the
7380 // right place.
7381 unsigned Spacing;
7382 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7383 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7384 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7385 Spacing));
7386 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7387 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7388 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7389 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7390 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7391 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7392 Spacing));
7393 TmpInst.addOperand(Inst.getOperand(1)); // lane
7394 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7395 TmpInst.addOperand(Inst.getOperand(6));
7396 Inst = TmpInst;
7397 return true;
7398 }
7399
7400 case ARM::VLD3LNdWB_register_Asm_8:
7401 case ARM::VLD3LNdWB_register_Asm_16:
7402 case ARM::VLD3LNdWB_register_Asm_32:
7403 case ARM::VLD3LNqWB_register_Asm_16:
7404 case ARM::VLD3LNqWB_register_Asm_32: {
7405 MCInst TmpInst;
7406 // Shuffle the operands around so the lane index operand is in the
7407 // right place.
7408 unsigned Spacing;
7409 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7410 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7411 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7412 Spacing));
7413 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7414 Spacing * 2));
7415 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7416 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7417 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7418 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7419 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7420 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7421 Spacing));
7422 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7423 Spacing * 2));
7424 TmpInst.addOperand(Inst.getOperand(1)); // lane
7425 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7426 TmpInst.addOperand(Inst.getOperand(6));
7427 Inst = TmpInst;
7428 return true;
7429 }
7430
7431 case ARM::VLD4LNdWB_register_Asm_8:
7432 case ARM::VLD4LNdWB_register_Asm_16:
7433 case ARM::VLD4LNdWB_register_Asm_32:
7434 case ARM::VLD4LNqWB_register_Asm_16:
7435 case ARM::VLD4LNqWB_register_Asm_32: {
7436 MCInst TmpInst;
7437 // Shuffle the operands around so t