Bug Summary

File:lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Warning:line 2522, column 3
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUAsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/lib/Target/AMDGPU/AsmParser -I /build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser -I /build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn325118/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/lib/Target/AMDGPU/AsmParser -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-14-150435-17243-1 -x c++ /build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp

/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp

1//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPU.h"
11#include "AMDKernelCodeT.h"
12#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13#include "MCTargetDesc/AMDGPUTargetStreamer.h"
14#include "SIDefines.h"
15#include "Utils/AMDGPUAsmUtils.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "Utils/AMDKernelCodeTUtils.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallBitVector.h"
23#include "llvm/ADT/SmallString.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/BinaryFormat/ELF.h"
28#include "llvm/CodeGen/MachineValueType.h"
29#include "llvm/MC/MCAsmInfo.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
33#include "llvm/MC/MCInstrDesc.h"
34#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
37#include "llvm/MC/MCParser/MCAsmParserExtension.h"
38#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39#include "llvm/MC/MCParser/MCTargetAsmParser.h"
40#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
43#include "llvm/MC/MCSymbol.h"
44#include "llvm/Support/AMDGPUMetadata.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/SMLoc.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <algorithm>
53#include <cassert>
54#include <cstdint>
55#include <cstring>
56#include <iterator>
57#include <map>
58#include <memory>
59#include <string>
60
61using namespace llvm;
62using namespace llvm::AMDGPU;
63
64namespace {
65
66class AMDGPUAsmParser;
67
68enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
69
70//===----------------------------------------------------------------------===//
71// Operand
72//===----------------------------------------------------------------------===//
73
74class AMDGPUOperand : public MCParsedAsmOperand {
75 enum KindTy {
76 Token,
77 Immediate,
78 Register,
79 Expression
80 } Kind;
81
82 SMLoc StartLoc, EndLoc;
83 const AMDGPUAsmParser *AsmParser;
84
85public:
86 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
87 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
88
89 using Ptr = std::unique_ptr<AMDGPUOperand>;
90
91 struct Modifiers {
92 bool Abs = false;
93 bool Neg = false;
94 bool Sext = false;
95
96 bool hasFPModifiers() const { return Abs || Neg; }
97 bool hasIntModifiers() const { return Sext; }
98 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
99
100 int64_t getFPModifiersOperand() const {
101 int64_t Operand = 0;
102 Operand |= Abs ? SISrcMods::ABS : 0;
103 Operand |= Neg ? SISrcMods::NEG : 0;
104 return Operand;
105 }
106
107 int64_t getIntModifiersOperand() const {
108 int64_t Operand = 0;
109 Operand |= Sext ? SISrcMods::SEXT : 0;
110 return Operand;
111 }
112
113 int64_t getModifiersOperand() const {
114 assert(!(hasFPModifiers() && hasIntModifiers())(static_cast <bool> (!(hasFPModifiers() && hasIntModifiers
()) && "fp and int modifiers should not be used simultaneously"
) ? void (0) : __assert_fail ("!(hasFPModifiers() && hasIntModifiers()) && \"fp and int modifiers should not be used simultaneously\""
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 115, __extension__ __PRETTY_FUNCTION__))
115 && "fp and int modifiers should not be used simultaneously")(static_cast <bool> (!(hasFPModifiers() && hasIntModifiers
()) && "fp and int modifiers should not be used simultaneously"
) ? void (0) : __assert_fail ("!(hasFPModifiers() && hasIntModifiers()) && \"fp and int modifiers should not be used simultaneously\""
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 115, __extension__ __PRETTY_FUNCTION__))
;
116 if (hasFPModifiers()) {
117 return getFPModifiersOperand();
118 } else if (hasIntModifiers()) {
119 return getIntModifiersOperand();
120 } else {
121 return 0;
122 }
123 }
124
125 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
126 };
127
128 enum ImmTy {
129 ImmTyNone,
130 ImmTyGDS,
131 ImmTyOffen,
132 ImmTyIdxen,
133 ImmTyAddr64,
134 ImmTyOffset,
135 ImmTyInstOffset,
136 ImmTyOffset0,
137 ImmTyOffset1,
138 ImmTyGLC,
139 ImmTySLC,
140 ImmTyTFE,
141 ImmTyD16,
142 ImmTyClampSI,
143 ImmTyOModSI,
144 ImmTyDppCtrl,
145 ImmTyDppRowMask,
146 ImmTyDppBankMask,
147 ImmTyDppBoundCtrl,
148 ImmTySdwaDstSel,
149 ImmTySdwaSrc0Sel,
150 ImmTySdwaSrc1Sel,
151 ImmTySdwaDstUnused,
152 ImmTyDMask,
153 ImmTyUNorm,
154 ImmTyDA,
155 ImmTyR128,
156 ImmTyLWE,
157 ImmTyExpTgt,
158 ImmTyExpCompr,
159 ImmTyExpVM,
160 ImmTyDFMT,
161 ImmTyNFMT,
162 ImmTyHwreg,
163 ImmTyOff,
164 ImmTySendMsg,
165 ImmTyInterpSlot,
166 ImmTyInterpAttr,
167 ImmTyAttrChan,
168 ImmTyOpSel,
169 ImmTyOpSelHi,
170 ImmTyNegLo,
171 ImmTyNegHi,
172 ImmTySwizzle,
173 ImmTyHigh
174 };
175
176 struct TokOp {
177 const char *Data;
178 unsigned Length;
179 };
180
181 struct ImmOp {
182 int64_t Val;
183 ImmTy Type;
184 bool IsFPImm;
185 Modifiers Mods;
186 };
187
188 struct RegOp {
189 unsigned RegNo;
190 bool IsForcedVOP3;
191 Modifiers Mods;
192 };
193
194 union {
195 TokOp Tok;
196 ImmOp Imm;
197 RegOp Reg;
198 const MCExpr *Expr;
199 };
200
201 bool isToken() const override {
202 if (Kind == Token)
203 return true;
204
205 if (Kind != Expression || !Expr)
206 return false;
207
208 // When parsing operands, we can't always tell if something was meant to be
209 // a token, like 'gds', or an expression that references a global variable.
210 // In this case, we assume the string is an expression, and if we need to
211 // interpret is a token, then we treat the symbol name as the token.
212 return isa<MCSymbolRefExpr>(Expr);
213 }
214
215 bool isImm() const override {
216 return Kind == Immediate;
217 }
218
219 bool isInlinableImm(MVT type) const;
220 bool isLiteralImm(MVT type) const;
221
222 bool isRegKind() const {
223 return Kind == Register;
224 }
225
226 bool isReg() const override {
227 return isRegKind() && !hasModifiers();
228 }
229
230 bool isRegOrImmWithInputMods(MVT type) const {
231 return isRegKind() || isInlinableImm(type);
232 }
233
234 bool isRegOrImmWithInt16InputMods() const {
235 return isRegOrImmWithInputMods(MVT::i16);
236 }
237
238 bool isRegOrImmWithInt32InputMods() const {
239 return isRegOrImmWithInputMods(MVT::i32);
240 }
241
242 bool isRegOrImmWithInt64InputMods() const {
243 return isRegOrImmWithInputMods(MVT::i64);
244 }
245
246 bool isRegOrImmWithFP16InputMods() const {
247 return isRegOrImmWithInputMods(MVT::f16);
248 }
249
250 bool isRegOrImmWithFP32InputMods() const {
251 return isRegOrImmWithInputMods(MVT::f32);
252 }
253
254 bool isRegOrImmWithFP64InputMods() const {
255 return isRegOrImmWithInputMods(MVT::f64);
256 }
257
258 bool isVReg() const {
259 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
260 isRegClass(AMDGPU::VReg_64RegClassID) ||
261 isRegClass(AMDGPU::VReg_96RegClassID) ||
262 isRegClass(AMDGPU::VReg_128RegClassID) ||
263 isRegClass(AMDGPU::VReg_256RegClassID) ||
264 isRegClass(AMDGPU::VReg_512RegClassID);
265 }
266
267 bool isVReg32OrOff() const {
268 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
269 }
270
271 bool isSDWAOperand(MVT type) const;
272 bool isSDWAFP16Operand() const;
273 bool isSDWAFP32Operand() const;
274 bool isSDWAInt16Operand() const;
275 bool isSDWAInt32Operand() const;
276
277 bool isImmTy(ImmTy ImmT) const {
278 return isImm() && Imm.Type == ImmT;
279 }
280
281 bool isImmModifier() const {
282 return isImm() && Imm.Type != ImmTyNone;
283 }
284
285 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
286 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
287 bool isDMask() const { return isImmTy(ImmTyDMask); }
288 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
289 bool isDA() const { return isImmTy(ImmTyDA); }
290 bool isR128() const { return isImmTy(ImmTyR128); }
291 bool isLWE() const { return isImmTy(ImmTyLWE); }
292 bool isOff() const { return isImmTy(ImmTyOff); }
293 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
294 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
295 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
296 bool isOffen() const { return isImmTy(ImmTyOffen); }
297 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
298 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
299 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
300 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
301 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
302
303 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
304 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
305 bool isGDS() const { return isImmTy(ImmTyGDS); }
306 bool isGLC() const { return isImmTy(ImmTyGLC); }
307 bool isSLC() const { return isImmTy(ImmTySLC); }
308 bool isTFE() const { return isImmTy(ImmTyTFE); }
309 bool isD16() const { return isImmTy(ImmTyD16); }
310 bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); }
311 bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); }
312 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
313 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
314 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
315 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
316 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
317 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
318 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
319 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
320 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
321 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
322 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
323 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
324 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
325 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
326 bool isHigh() const { return isImmTy(ImmTyHigh); }
327
328 bool isMod() const {
329 return isClampSI() || isOModSI();
330 }
331
332 bool isRegOrImm() const {
333 return isReg() || isImm();
334 }
335
336 bool isRegClass(unsigned RCID) const;
337
338 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
339 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
340 }
341
342 bool isSCSrcB16() const {
343 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
344 }
345
346 bool isSCSrcV2B16() const {
347 return isSCSrcB16();
348 }
349
350 bool isSCSrcB32() const {
351 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
352 }
353
354 bool isSCSrcB64() const {
355 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
356 }
357
358 bool isSCSrcF16() const {
359 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
360 }
361
362 bool isSCSrcV2F16() const {
363 return isSCSrcF16();
364 }
365
366 bool isSCSrcF32() const {
367 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
368 }
369
370 bool isSCSrcF64() const {
371 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
372 }
373
374 bool isSSrcB32() const {
375 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
376 }
377
378 bool isSSrcB16() const {
379 return isSCSrcB16() || isLiteralImm(MVT::i16);
380 }
381
382 bool isSSrcV2B16() const {
383 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 383)
;
384 return isSSrcB16();
385 }
386
387 bool isSSrcB64() const {
388 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
389 // See isVSrc64().
390 return isSCSrcB64() || isLiteralImm(MVT::i64);
391 }
392
393 bool isSSrcF32() const {
394 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
395 }
396
397 bool isSSrcF64() const {
398 return isSCSrcB64() || isLiteralImm(MVT::f64);
399 }
400
401 bool isSSrcF16() const {
402 return isSCSrcB16() || isLiteralImm(MVT::f16);
403 }
404
405 bool isSSrcV2F16() const {
406 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 406)
;
407 return isSSrcF16();
408 }
409
410 bool isVCSrcB32() const {
411 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
412 }
413
414 bool isVCSrcB64() const {
415 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
416 }
417
418 bool isVCSrcB16() const {
419 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
420 }
421
422 bool isVCSrcV2B16() const {
423 return isVCSrcB16();
424 }
425
426 bool isVCSrcF32() const {
427 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
428 }
429
430 bool isVCSrcF64() const {
431 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
432 }
433
434 bool isVCSrcF16() const {
435 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
436 }
437
438 bool isVCSrcV2F16() const {
439 return isVCSrcF16();
440 }
441
442 bool isVSrcB32() const {
443 return isVCSrcF32() || isLiteralImm(MVT::i32);
444 }
445
446 bool isVSrcB64() const {
447 return isVCSrcF64() || isLiteralImm(MVT::i64);
448 }
449
450 bool isVSrcB16() const {
451 return isVCSrcF16() || isLiteralImm(MVT::i16);
452 }
453
454 bool isVSrcV2B16() const {
455 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 455)
;
456 return isVSrcB16();
457 }
458
459 bool isVSrcF32() const {
460 return isVCSrcF32() || isLiteralImm(MVT::f32);
461 }
462
463 bool isVSrcF64() const {
464 return isVCSrcF64() || isLiteralImm(MVT::f64);
465 }
466
467 bool isVSrcF16() const {
468 return isVCSrcF16() || isLiteralImm(MVT::f16);
469 }
470
471 bool isVSrcV2F16() const {
472 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 472)
;
473 return isVSrcF16();
474 }
475
476 bool isKImmFP32() const {
477 return isLiteralImm(MVT::f32);
478 }
479
480 bool isKImmFP16() const {
481 return isLiteralImm(MVT::f16);
482 }
483
484 bool isMem() const override {
485 return false;
486 }
487
488 bool isExpr() const {
489 return Kind == Expression;
490 }
491
492 bool isSoppBrTarget() const {
493 return isExpr() || isImm();
494 }
495
496 bool isSWaitCnt() const;
497 bool isHwreg() const;
498 bool isSendMsg() const;
499 bool isSwizzle() const;
500 bool isSMRDOffset8() const;
501 bool isSMRDOffset20() const;
502 bool isSMRDLiteralOffset() const;
503 bool isDPPCtrl() const;
504 bool isGPRIdxMode() const;
505 bool isS16Imm() const;
506 bool isU16Imm() const;
507
508 StringRef getExpressionAsToken() const {
509 assert(isExpr())(static_cast <bool> (isExpr()) ? void (0) : __assert_fail
("isExpr()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
511 return S->getSymbol().getName();
512 }
513
514 StringRef getToken() const {
515 assert(isToken())(static_cast <bool> (isToken()) ? void (0) : __assert_fail
("isToken()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 515, __extension__ __PRETTY_FUNCTION__))
;
516
517 if (Kind == Expression)
518 return getExpressionAsToken();
519
520 return StringRef(Tok.Data, Tok.Length);
521 }
522
523 int64_t getImm() const {
524 assert(isImm())(static_cast <bool> (isImm()) ? void (0) : __assert_fail
("isImm()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 524, __extension__ __PRETTY_FUNCTION__))
;
525 return Imm.Val;
526 }
527
528 ImmTy getImmTy() const {
529 assert(isImm())(static_cast <bool> (isImm()) ? void (0) : __assert_fail
("isImm()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 529, __extension__ __PRETTY_FUNCTION__))
;
530 return Imm.Type;
531 }
532
533 unsigned getReg() const override {
534 return Reg.RegNo;
535 }
536
537 SMLoc getStartLoc() const override {
538 return StartLoc;
539 }
540
541 SMLoc getEndLoc() const override {
542 return EndLoc;
543 }
544
545 SMRange getLocRange() const {
546 return SMRange(StartLoc, EndLoc);
547 }
548
549 Modifiers getModifiers() const {
550 assert(isRegKind() || isImmTy(ImmTyNone))(static_cast <bool> (isRegKind() || isImmTy(ImmTyNone))
? void (0) : __assert_fail ("isRegKind() || isImmTy(ImmTyNone)"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 550, __extension__ __PRETTY_FUNCTION__))
;
551 return isRegKind() ? Reg.Mods : Imm.Mods;
552 }
553
554 void setModifiers(Modifiers Mods) {
555 assert(isRegKind() || isImmTy(ImmTyNone))(static_cast <bool> (isRegKind() || isImmTy(ImmTyNone))
? void (0) : __assert_fail ("isRegKind() || isImmTy(ImmTyNone)"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 555, __extension__ __PRETTY_FUNCTION__))
;
556 if (isRegKind())
557 Reg.Mods = Mods;
558 else
559 Imm.Mods = Mods;
560 }
561
562 bool hasModifiers() const {
563 return getModifiers().hasModifiers();
564 }
565
566 bool hasFPModifiers() const {
567 return getModifiers().hasFPModifiers();
568 }
569
570 bool hasIntModifiers() const {
571 return getModifiers().hasIntModifiers();
572 }
573
574 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
575
576 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
577
578 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
579
580 template <unsigned Bitwidth>
581 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
582
583 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
584 addKImmFPOperands<16>(Inst, N);
585 }
586
587 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
588 addKImmFPOperands<32>(Inst, N);
589 }
590
591 void addRegOperands(MCInst &Inst, unsigned N) const;
592
593 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
594 if (isRegKind())
595 addRegOperands(Inst, N);
596 else if (isExpr())
597 Inst.addOperand(MCOperand::createExpr(Expr));
598 else
599 addImmOperands(Inst, N);
600 }
601
602 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
603 Modifiers Mods = getModifiers();
604 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
605 if (isRegKind()) {
606 addRegOperands(Inst, N);
607 } else {
608 addImmOperands(Inst, N, false);
609 }
610 }
611
612 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
613 assert(!hasIntModifiers())(static_cast <bool> (!hasIntModifiers()) ? void (0) : __assert_fail
("!hasIntModifiers()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 613, __extension__ __PRETTY_FUNCTION__))
;
614 addRegOrImmWithInputModsOperands(Inst, N);
615 }
616
617 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
618 assert(!hasFPModifiers())(static_cast <bool> (!hasFPModifiers()) ? void (0) : __assert_fail
("!hasFPModifiers()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 618, __extension__ __PRETTY_FUNCTION__))
;
619 addRegOrImmWithInputModsOperands(Inst, N);
620 }
621
622 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
623 Modifiers Mods = getModifiers();
624 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
625 assert(isRegKind())(static_cast <bool> (isRegKind()) ? void (0) : __assert_fail
("isRegKind()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 addRegOperands(Inst, N);
627 }
628
629 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
630 assert(!hasIntModifiers())(static_cast <bool> (!hasIntModifiers()) ? void (0) : __assert_fail
("!hasIntModifiers()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 630, __extension__ __PRETTY_FUNCTION__))
;
631 addRegWithInputModsOperands(Inst, N);
632 }
633
634 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
635 assert(!hasFPModifiers())(static_cast <bool> (!hasFPModifiers()) ? void (0) : __assert_fail
("!hasFPModifiers()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 635, __extension__ __PRETTY_FUNCTION__))
;
636 addRegWithInputModsOperands(Inst, N);
637 }
638
639 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
640 if (isImm())
641 addImmOperands(Inst, N);
642 else {
643 assert(isExpr())(static_cast <bool> (isExpr()) ? void (0) : __assert_fail
("isExpr()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 643, __extension__ __PRETTY_FUNCTION__))
;
644 Inst.addOperand(MCOperand::createExpr(Expr));
645 }
646 }
647
648 static void printImmTy(raw_ostream& OS, ImmTy Type) {
649 switch (Type) {
650 case ImmTyNone: OS << "None"; break;
651 case ImmTyGDS: OS << "GDS"; break;
652 case ImmTyOffen: OS << "Offen"; break;
653 case ImmTyIdxen: OS << "Idxen"; break;
654 case ImmTyAddr64: OS << "Addr64"; break;
655 case ImmTyOffset: OS << "Offset"; break;
656 case ImmTyInstOffset: OS << "InstOffset"; break;
657 case ImmTyOffset0: OS << "Offset0"; break;
658 case ImmTyOffset1: OS << "Offset1"; break;
659 case ImmTyGLC: OS << "GLC"; break;
660 case ImmTySLC: OS << "SLC"; break;
661 case ImmTyTFE: OS << "TFE"; break;
662 case ImmTyD16: OS << "D16"; break;
663 case ImmTyDFMT: OS << "DFMT"; break;
664 case ImmTyNFMT: OS << "NFMT"; break;
665 case ImmTyClampSI: OS << "ClampSI"; break;
666 case ImmTyOModSI: OS << "OModSI"; break;
667 case ImmTyDppCtrl: OS << "DppCtrl"; break;
668 case ImmTyDppRowMask: OS << "DppRowMask"; break;
669 case ImmTyDppBankMask: OS << "DppBankMask"; break;
670 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
671 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
672 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
673 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
674 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
675 case ImmTyDMask: OS << "DMask"; break;
676 case ImmTyUNorm: OS << "UNorm"; break;
677 case ImmTyDA: OS << "DA"; break;
678 case ImmTyR128: OS << "R128"; break;
679 case ImmTyLWE: OS << "LWE"; break;
680 case ImmTyOff: OS << "Off"; break;
681 case ImmTyExpTgt: OS << "ExpTgt"; break;
682 case ImmTyExpCompr: OS << "ExpCompr"; break;
683 case ImmTyExpVM: OS << "ExpVM"; break;
684 case ImmTyHwreg: OS << "Hwreg"; break;
685 case ImmTySendMsg: OS << "SendMsg"; break;
686 case ImmTyInterpSlot: OS << "InterpSlot"; break;
687 case ImmTyInterpAttr: OS << "InterpAttr"; break;
688 case ImmTyAttrChan: OS << "AttrChan"; break;
689 case ImmTyOpSel: OS << "OpSel"; break;
690 case ImmTyOpSelHi: OS << "OpSelHi"; break;
691 case ImmTyNegLo: OS << "NegLo"; break;
692 case ImmTyNegHi: OS << "NegHi"; break;
693 case ImmTySwizzle: OS << "Swizzle"; break;
694 case ImmTyHigh: OS << "High"; break;
695 }
696 }
697
698 void print(raw_ostream &OS) const override {
699 switch (Kind) {
700 case Register:
701 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
702 break;
703 case Immediate:
704 OS << '<' << getImm();
705 if (getImmTy() != ImmTyNone) {
706 OS << " type: "; printImmTy(OS, getImmTy());
707 }
708 OS << " mods: " << Imm.Mods << '>';
709 break;
710 case Token:
711 OS << '\'' << getToken() << '\'';
712 break;
713 case Expression:
714 OS << "<expr " << *Expr << '>';
715 break;
716 }
717 }
718
719 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
720 int64_t Val, SMLoc Loc,
721 ImmTy Type = ImmTyNone,
722 bool IsFPImm = false) {
723 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
724 Op->Imm.Val = Val;
725 Op->Imm.IsFPImm = IsFPImm;
726 Op->Imm.Type = Type;
727 Op->Imm.Mods = Modifiers();
728 Op->StartLoc = Loc;
729 Op->EndLoc = Loc;
730 return Op;
731 }
732
733 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
734 StringRef Str, SMLoc Loc,
735 bool HasExplicitEncodingSize = true) {
736 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
737 Res->Tok.Data = Str.data();
738 Res->Tok.Length = Str.size();
739 Res->StartLoc = Loc;
740 Res->EndLoc = Loc;
741 return Res;
742 }
743
744 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
745 unsigned RegNo, SMLoc S,
746 SMLoc E,
747 bool ForceVOP3) {
748 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
749 Op->Reg.RegNo = RegNo;
750 Op->Reg.Mods = Modifiers();
751 Op->Reg.IsForcedVOP3 = ForceVOP3;
752 Op->StartLoc = S;
753 Op->EndLoc = E;
754 return Op;
755 }
756
757 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
758 const class MCExpr *Expr, SMLoc S) {
759 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
760 Op->Expr = Expr;
761 Op->StartLoc = S;
762 Op->EndLoc = S;
763 return Op;
764 }
765};
766
767raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
768 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
769 return OS;
770}
771
772//===----------------------------------------------------------------------===//
773// AsmParser
774//===----------------------------------------------------------------------===//
775
776// Holds info related to the current kernel, e.g. count of SGPRs used.
777// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
778// .amdgpu_hsa_kernel or at EOF.
779class KernelScopeInfo {
780 int SgprIndexUnusedMin = -1;
781 int VgprIndexUnusedMin = -1;
782 MCContext *Ctx = nullptr;
783
784 void usesSgprAt(int i) {
785 if (i >= SgprIndexUnusedMin) {
786 SgprIndexUnusedMin = ++i;
787 if (Ctx) {
788 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
789 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
790 }
791 }
792 }
793
794 void usesVgprAt(int i) {
795 if (i >= VgprIndexUnusedMin) {
796 VgprIndexUnusedMin = ++i;
797 if (Ctx) {
798 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
799 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
800 }
801 }
802 }
803
804public:
805 KernelScopeInfo() = default;
806
807 void initialize(MCContext &Context) {
808 Ctx = &Context;
809 usesSgprAt(SgprIndexUnusedMin = -1);
810 usesVgprAt(VgprIndexUnusedMin = -1);
811 }
812
813 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
814 switch (RegKind) {
815 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
816 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
817 default: break;
818 }
819 }
820};
821
822class AMDGPUAsmParser : public MCTargetAsmParser {
823 MCAsmParser &Parser;
824
825 // Number of extra operands parsed after the first optional operand.
826 // This may be necessary to skip hardcoded mandatory operands.
827 static const unsigned MAX_OPR_LOOKAHEAD = 8;
828
829 unsigned ForcedEncodingSize = 0;
830 bool ForcedDPP = false;
831 bool ForcedSDWA = false;
832 KernelScopeInfo KernelScope;
833
834 /// @name Auto-generated Match Functions
835 /// {
836
837#define GET_ASSEMBLER_HEADER
838#include "AMDGPUGenAsmMatcher.inc"
839
840 /// }
841
842private:
843 bool ParseAsAbsoluteExpression(uint32_t &Ret);
844 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
845 bool ParseDirectiveHSACodeObjectVersion();
846 bool ParseDirectiveHSACodeObjectISA();
847 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
848 bool ParseDirectiveAMDKernelCodeT();
849 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
850 bool ParseDirectiveAMDGPUHsaKernel();
851
852 bool ParseDirectiveISAVersion();
853 bool ParseDirectiveHSAMetadata();
854 bool ParseDirectivePALMetadata();
855
856 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
857 RegisterKind RegKind, unsigned Reg1,
858 unsigned RegNum);
859 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
860 unsigned& RegNum, unsigned& RegWidth,
861 unsigned *DwordRegIndex);
862 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
863 bool IsAtomic, bool IsAtomicReturn);
864 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
865 bool IsGdsHardcoded);
866
867public:
868 enum AMDGPUMatchResultTy {
869 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
870 };
871
872 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
873
874 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
875 const MCInstrInfo &MII,
876 const MCTargetOptions &Options)
877 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
878 MCAsmParserExtension::Initialize(Parser);
879
880 if (getFeatureBits().none()) {
881 // Set default features.
882 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
883 }
884
885 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
886
887 {
888 // TODO: make those pre-defined variables read-only.
889 // Currently there is none suitable machinery in the core llvm-mc for this.
890 // MCSymbol::isRedefinable is intended for another purpose, and
891 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
892 AMDGPU::IsaInfo::IsaVersion ISA =
893 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
894 MCContext &Ctx = getContext();
895 MCSymbol *Sym =
896 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
897 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
898 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
899 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
900 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
901 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
902 }
903 KernelScope.initialize(getContext());
904 }
905
906 bool hasXNACK() const {
907 return AMDGPU::hasXNACK(getSTI());
908 }
909
910 bool hasMIMG_R128() const {
911 return AMDGPU::hasMIMG_R128(getSTI());
912 }
913
914 bool hasPackedD16() const {
915 return AMDGPU::hasPackedD16(getSTI());
916 }
917
918 bool isSI() const {
919 return AMDGPU::isSI(getSTI());
920 }
921
922 bool isCI() const {
923 return AMDGPU::isCI(getSTI());
924 }
925
926 bool isVI() const {
927 return AMDGPU::isVI(getSTI());
928 }
929
930 bool isGFX9() const {
931 return AMDGPU::isGFX9(getSTI());
932 }
933
934 bool hasInv2PiInlineImm() const {
935 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
936 }
937
938 bool hasFlatOffsets() const {
939 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
940 }
941
942 bool hasSGPR102_SGPR103() const {
943 return !isVI();
944 }
945
946 bool hasIntClamp() const {
947 return getFeatureBits()[AMDGPU::FeatureIntClamp];
948 }
949
950 AMDGPUTargetStreamer &getTargetStreamer() {
951 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
952 return static_cast<AMDGPUTargetStreamer &>(TS);
953 }
954
955 const MCRegisterInfo *getMRI() const {
956 // We need this const_cast because for some reason getContext() is not const
957 // in MCAsmParser.
958 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
959 }
960
961 const MCInstrInfo *getMII() const {
962 return &MII;
963 }
964
965 const FeatureBitset &getFeatureBits() const {
966 return getSTI().getFeatureBits();
967 }
968
969 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
970 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
971 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
972
973 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
974 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
975 bool isForcedDPP() const { return ForcedDPP; }
976 bool isForcedSDWA() const { return ForcedSDWA; }
977 ArrayRef<unsigned> getMatchedVariants() const;
978
979 std::unique_ptr<AMDGPUOperand> parseRegister();
980 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
981 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
982 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
983 unsigned Kind) override;
984 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
985 OperandVector &Operands, MCStreamer &Out,
986 uint64_t &ErrorInfo,
987 bool MatchingInlineAsm) override;
988 bool ParseDirective(AsmToken DirectiveID) override;
989 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
990 StringRef parseMnemonicSuffix(StringRef Name);
991 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
992 SMLoc NameLoc, OperandVector &Operands) override;
993 //bool ProcessInstruction(MCInst &Inst);
994
995 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
996
997 OperandMatchResultTy
998 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
999 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1000 bool (*ConvertResult)(int64_t &) = nullptr);
1001
1002 OperandMatchResultTy parseOperandArrayWithPrefix(
1003 const char *Prefix,
1004 OperandVector &Operands,
1005 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1006 bool (*ConvertResult)(int64_t&) = nullptr);
1007
1008 OperandMatchResultTy
1009 parseNamedBit(const char *Name, OperandVector &Operands,
1010 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
1011 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1012 StringRef &Value);
1013
1014 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
1015 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
1016 OperandMatchResultTy parseReg(OperandVector &Operands);
1017 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
1018 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1019 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1020 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1021 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
1022 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
1023
1024 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
1025 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1026 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
1027 void cvtExp(MCInst &Inst, const OperandVector &Operands);
1028
1029 bool parseCnt(int64_t &IntVal);
1030 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
1031 OperandMatchResultTy parseHwreg(OperandVector &Operands);
1032
1033private:
1034 struct OperandInfoTy {
1035 int64_t Id;
1036 bool IsSymbolic = false;
1037
1038 OperandInfoTy(int64_t Id_) : Id(Id_) {}
1039 };
1040
1041 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1042 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
1043
1044 void errorExpTgt();
1045 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1046
1047 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1048 bool validateConstantBusLimitations(const MCInst &Inst);
1049 bool validateEarlyClobberLimitations(const MCInst &Inst);
1050 bool validateIntClampSupported(const MCInst &Inst);
1051 bool validateMIMGAtomicDMask(const MCInst &Inst);
1052 bool validateMIMGDataSize(const MCInst &Inst);
1053 bool validateMIMGR128(const MCInst &Inst);
1054 bool validateMIMGD16(const MCInst &Inst);
1055 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1056 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1057 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
1058
1059 bool trySkipId(const StringRef Id);
1060 bool trySkipToken(const AsmToken::TokenKind Kind);
1061 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1062 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1063 bool parseExpr(int64_t &Imm);
1064
1065public:
1066 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
1067 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
1068
1069 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
1070 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
1071 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1072 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
1073 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1074
1075 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1076 const unsigned MinVal,
1077 const unsigned MaxVal,
1078 const StringRef ErrMsg);
1079 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1080 bool parseSwizzleOffset(int64_t &Imm);
1081 bool parseSwizzleMacro(int64_t &Imm);
1082 bool parseSwizzleQuadPerm(int64_t &Imm);
1083 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1084 bool parseSwizzleBroadcast(int64_t &Imm);
1085 bool parseSwizzleSwap(int64_t &Imm);
1086 bool parseSwizzleReverse(int64_t &Imm);
1087
1088 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1089 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1090 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
1091 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1092
1093 AMDGPUOperand::Ptr defaultGLC() const;
1094 AMDGPUOperand::Ptr defaultSLC() const;
1095 AMDGPUOperand::Ptr defaultTFE() const;
1096
1097 AMDGPUOperand::Ptr defaultD16() const;
1098 AMDGPUOperand::Ptr defaultDMask() const;
1099 AMDGPUOperand::Ptr defaultUNorm() const;
1100 AMDGPUOperand::Ptr defaultDA() const;
1101 AMDGPUOperand::Ptr defaultR128() const;
1102 AMDGPUOperand::Ptr defaultLWE() const;
1103 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1104 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
1105 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
1106 AMDGPUOperand::Ptr defaultOffsetU12() const;
1107 AMDGPUOperand::Ptr defaultOffsetS13() const;
1108
1109 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1110
1111 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1112 OptionalImmIndexMap &OptionalIdx);
1113 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
1114 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
1115 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
1116
1117 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1118
1119 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1120 bool IsAtomic = false);
1121 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
1122
1123 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
1124 AMDGPUOperand::Ptr defaultRowMask() const;
1125 AMDGPUOperand::Ptr defaultBankMask() const;
1126 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1127 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
1128
1129 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1130 AMDGPUOperand::ImmTy Type);
1131 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
1132 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1133 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
1134 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
1135 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1136 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
1137 uint64_t BasicInstType, bool skipVcc = false);
1138};
1139
1140struct OptionalOperand {
1141 const char *Name;
1142 AMDGPUOperand::ImmTy Type;
1143 bool IsBit;
1144 bool (*ConvertResult)(int64_t&);
1145};
1146
1147} // end anonymous namespace
1148
1149// May be called with integer type with equivalent bitwidth.
1150static const fltSemantics *getFltSemantics(unsigned Size) {
1151 switch (Size) {
1152 case 4:
1153 return &APFloat::IEEEsingle();
1154 case 8:
1155 return &APFloat::IEEEdouble();
1156 case 2:
1157 return &APFloat::IEEEhalf();
1158 default:
1159 llvm_unreachable("unsupported fp type")::llvm::llvm_unreachable_internal("unsupported fp type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1159)
;
1160 }
1161}
1162
1163static const fltSemantics *getFltSemantics(MVT VT) {
1164 return getFltSemantics(VT.getSizeInBits() / 8);
1165}
1166
1167static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1168 switch (OperandType) {
1169 case AMDGPU::OPERAND_REG_IMM_INT32:
1170 case AMDGPU::OPERAND_REG_IMM_FP32:
1171 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1172 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1173 return &APFloat::IEEEsingle();
1174 case AMDGPU::OPERAND_REG_IMM_INT64:
1175 case AMDGPU::OPERAND_REG_IMM_FP64:
1176 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1177 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1178 return &APFloat::IEEEdouble();
1179 case AMDGPU::OPERAND_REG_IMM_INT16:
1180 case AMDGPU::OPERAND_REG_IMM_FP16:
1181 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1182 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1183 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1184 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1185 return &APFloat::IEEEhalf();
1186 default:
1187 llvm_unreachable("unsupported fp type")::llvm::llvm_unreachable_internal("unsupported fp type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1187)
;
1188 }
1189}
1190
1191//===----------------------------------------------------------------------===//
1192// Operand
1193//===----------------------------------------------------------------------===//
1194
1195static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1196 bool Lost;
1197
1198 // Convert literal to single precision
1199 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1200 APFloat::rmNearestTiesToEven,
1201 &Lost);
1202 // We allow precision lost but not overflow or underflow
1203 if (Status != APFloat::opOK &&
1204 Lost &&
1205 ((Status & APFloat::opOverflow) != 0 ||
1206 (Status & APFloat::opUnderflow) != 0)) {
1207 return false;
1208 }
1209
1210 return true;
1211}
1212
1213bool AMDGPUOperand::isInlinableImm(MVT type) const {
1214 if (!isImmTy(ImmTyNone)) {
1215 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1216 return false;
1217 }
1218 // TODO: We should avoid using host float here. It would be better to
1219 // check the float bit values which is what a few other places do.
1220 // We've had bot failures before due to weird NaN support on mips hosts.
1221
1222 APInt Literal(64, Imm.Val);
1223
1224 if (Imm.IsFPImm) { // We got fp literal token
1225 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
1226 return AMDGPU::isInlinableLiteral64(Imm.Val,
1227 AsmParser->hasInv2PiInlineImm());
1228 }
1229
1230 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
1231 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1232 return false;
1233
1234 if (type.getScalarSizeInBits() == 16) {
1235 return AMDGPU::isInlinableLiteral16(
1236 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
1237 AsmParser->hasInv2PiInlineImm());
1238 }
1239
1240 // Check if single precision literal is inlinable
1241 return AMDGPU::isInlinableLiteral32(
1242 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
1243 AsmParser->hasInv2PiInlineImm());
1244 }
1245
1246 // We got int literal token.
1247 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
1248 return AMDGPU::isInlinableLiteral64(Imm.Val,
1249 AsmParser->hasInv2PiInlineImm());
1250 }
1251
1252 if (type.getScalarSizeInBits() == 16) {
1253 return AMDGPU::isInlinableLiteral16(
1254 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1255 AsmParser->hasInv2PiInlineImm());
1256 }
1257
1258 return AMDGPU::isInlinableLiteral32(
1259 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
1260 AsmParser->hasInv2PiInlineImm());
1261}
1262
1263bool AMDGPUOperand::isLiteralImm(MVT type) const {
1264 // Check that this immediate can be added as literal
1265 if (!isImmTy(ImmTyNone)) {
1266 return false;
1267 }
1268
1269 if (!Imm.IsFPImm) {
1270 // We got int literal token.
1271
1272 if (type == MVT::f64 && hasFPModifiers()) {
1273 // Cannot apply fp modifiers to int literals preserving the same semantics
1274 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1275 // disable these cases.
1276 return false;
1277 }
1278
1279 unsigned Size = type.getSizeInBits();
1280 if (Size == 64)
1281 Size = 32;
1282
1283 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1284 // types.
1285 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
1286 }
1287
1288 // We got fp literal token
1289 if (type == MVT::f64) { // Expected 64-bit fp operand
1290 // We would set low 64-bits of literal to zeroes but we accept this literals
1291 return true;
1292 }
1293
1294 if (type == MVT::i64) { // Expected 64-bit int operand
1295 // We don't allow fp literals in 64-bit integer instructions. It is
1296 // unclear how we should encode them.
1297 return false;
1298 }
1299
1300 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
1301 return canLosslesslyConvertToFPType(FPLiteral, type);
1302}
1303
1304bool AMDGPUOperand::isRegClass(unsigned RCID) const {
1305 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
1306}
1307
1308bool AMDGPUOperand::isSDWAOperand(MVT type) const {
1309 if (AsmParser->isVI())
1310 return isVReg();
1311 else if (AsmParser->isGFX9())
1312 return isRegKind() || isInlinableImm(type);
1313 else
1314 return false;
1315}
1316
1317bool AMDGPUOperand::isSDWAFP16Operand() const {
1318 return isSDWAOperand(MVT::f16);
1319}
1320
1321bool AMDGPUOperand::isSDWAFP32Operand() const {
1322 return isSDWAOperand(MVT::f32);
1323}
1324
1325bool AMDGPUOperand::isSDWAInt16Operand() const {
1326 return isSDWAOperand(MVT::i16);
1327}
1328
1329bool AMDGPUOperand::isSDWAInt32Operand() const {
1330 return isSDWAOperand(MVT::i32);
1331}
1332
1333uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1334{
1335 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers())(static_cast <bool> (isImmTy(ImmTyNone) && Imm.
Mods.hasFPModifiers()) ? void (0) : __assert_fail ("isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers()"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1335, __extension__ __PRETTY_FUNCTION__))
;
1336 assert(Size == 2 || Size == 4 || Size == 8)(static_cast <bool> (Size == 2 || Size == 4 || Size == 8
) ? void (0) : __assert_fail ("Size == 2 || Size == 4 || Size == 8"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1336, __extension__ __PRETTY_FUNCTION__))
;
1337
1338 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1339
1340 if (Imm.Mods.Abs) {
1341 Val &= ~FpSignMask;
1342 }
1343 if (Imm.Mods.Neg) {
1344 Val ^= FpSignMask;
1345 }
1346
1347 return Val;
1348}
1349
1350void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
1351 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1352 Inst.getNumOperands())) {
1353 addLiteralImmOperand(Inst, Imm.Val,
1354 ApplyModifiers &
1355 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1356 } else {
1357 assert(!isImmTy(ImmTyNone) || !hasModifiers())(static_cast <bool> (!isImmTy(ImmTyNone) || !hasModifiers
()) ? void (0) : __assert_fail ("!isImmTy(ImmTyNone) || !hasModifiers()"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1357, __extension__ __PRETTY_FUNCTION__))
;
1358 Inst.addOperand(MCOperand::createImm(Imm.Val));
1359 }
1360}
1361
1362void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
1363 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1364 auto OpNum = Inst.getNumOperands();
1365 // Check that this operand accepts literals
1366 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum))(static_cast <bool> (AMDGPU::isSISrcOperand(InstDesc, OpNum
)) ? void (0) : __assert_fail ("AMDGPU::isSISrcOperand(InstDesc, OpNum)"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1366, __extension__ __PRETTY_FUNCTION__))
;
1367
1368 if (ApplyModifiers) {
1369 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum))(static_cast <bool> (AMDGPU::isSISrcFPOperand(InstDesc,
OpNum)) ? void (0) : __assert_fail ("AMDGPU::isSISrcFPOperand(InstDesc, OpNum)"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1369, __extension__ __PRETTY_FUNCTION__))
;
1370 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1371 Val = applyInputFPModifiers(Val, Size);
1372 }
1373
1374 APInt Literal(64, Val);
1375 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
1376
1377 if (Imm.IsFPImm) { // We got fp literal token
1378 switch (OpTy) {
1379 case AMDGPU::OPERAND_REG_IMM_INT64:
1380 case AMDGPU::OPERAND_REG_IMM_FP64:
1381 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1382 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1383 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1384 AsmParser->hasInv2PiInlineImm())) {
1385 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
1386 return;
1387 }
1388
1389 // Non-inlineable
1390 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
1391 // For fp operands we check if low 32 bits are zeros
1392 if (Literal.getLoBits(32) != 0) {
1393 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
1394 "Can't encode literal as exact 64-bit floating-point operand. "
1395 "Low 32-bits will be set to zero");
1396 }
1397
1398 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
1399 return;
1400 }
1401
1402 // We don't allow fp literals in 64-bit integer instructions. It is
1403 // unclear how we should encode them. This case should be checked earlier
1404 // in predicate methods (isLiteralImm())
1405 llvm_unreachable("fp literal in 64-bit integer instruction.")::llvm::llvm_unreachable_internal("fp literal in 64-bit integer instruction."
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1405)
;
1406
1407 case AMDGPU::OPERAND_REG_IMM_INT32:
1408 case AMDGPU::OPERAND_REG_IMM_FP32:
1409 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1410 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1411 case AMDGPU::OPERAND_REG_IMM_INT16:
1412 case AMDGPU::OPERAND_REG_IMM_FP16:
1413 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1414 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1415 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1416 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1417 bool lost;
1418 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
1419 // Convert literal to single precision
1420 FPLiteral.convert(*getOpFltSemantics(OpTy),
1421 APFloat::rmNearestTiesToEven, &lost);
1422 // We allow precision lost but not overflow or underflow. This should be
1423 // checked earlier in isLiteralImm()
1424
1425 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1426 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1427 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1428 ImmVal |= (ImmVal << 16);
1429 }
1430
1431 Inst.addOperand(MCOperand::createImm(ImmVal));
1432 return;
1433 }
1434 default:
1435 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1435)
;
1436 }
1437
1438 return;
1439 }
1440
1441 // We got int literal token.
1442 // Only sign extend inline immediates.
1443 // FIXME: No errors on truncation
1444 switch (OpTy) {
1445 case AMDGPU::OPERAND_REG_IMM_INT32:
1446 case AMDGPU::OPERAND_REG_IMM_FP32:
1447 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1448 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1449 if (isInt<32>(Val) &&
1450 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1451 AsmParser->hasInv2PiInlineImm())) {
1452 Inst.addOperand(MCOperand::createImm(Val));
1453 return;
1454 }
1455
1456 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1457 return;
1458
1459 case AMDGPU::OPERAND_REG_IMM_INT64:
1460 case AMDGPU::OPERAND_REG_IMM_FP64:
1461 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1462 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1463 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
1464 Inst.addOperand(MCOperand::createImm(Val));
1465 return;
1466 }
1467
1468 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1469 return;
1470
1471 case AMDGPU::OPERAND_REG_IMM_INT16:
1472 case AMDGPU::OPERAND_REG_IMM_FP16:
1473 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1474 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1475 if (isInt<16>(Val) &&
1476 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1477 AsmParser->hasInv2PiInlineImm())) {
1478 Inst.addOperand(MCOperand::createImm(Val));
1479 return;
1480 }
1481
1482 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1483 return;
1484
1485 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1486 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1487 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1488 assert(AMDGPU::isInlinableLiteral16(LiteralVal,(static_cast <bool> (AMDGPU::isInlinableLiteral16(LiteralVal
, AsmParser->hasInv2PiInlineImm())) ? void (0) : __assert_fail
("AMDGPU::isInlinableLiteral16(LiteralVal, AsmParser->hasInv2PiInlineImm())"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1489, __extension__ __PRETTY_FUNCTION__))
1489 AsmParser->hasInv2PiInlineImm()))(static_cast <bool> (AMDGPU::isInlinableLiteral16(LiteralVal
, AsmParser->hasInv2PiInlineImm())) ? void (0) : __assert_fail
("AMDGPU::isInlinableLiteral16(LiteralVal, AsmParser->hasInv2PiInlineImm())"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1489, __extension__ __PRETTY_FUNCTION__))
;
1490
1491 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1492 static_cast<uint32_t>(LiteralVal);
1493 Inst.addOperand(MCOperand::createImm(ImmVal));
1494 return;
1495 }
1496 default:
1497 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1497)
;
1498 }
1499}
1500
1501template <unsigned Bitwidth>
1502void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
1503 APInt Literal(64, Imm.Val);
1504
1505 if (!Imm.IsFPImm) {
1506 // We got int literal token.
1507 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1508 return;
1509 }
1510
1511 bool Lost;
1512 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
1513 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1514 APFloat::rmNearestTiesToEven, &Lost);
1515 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
1516}
1517
1518void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1519 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1520}
1521
1522//===----------------------------------------------------------------------===//
1523// AsmParser
1524//===----------------------------------------------------------------------===//
1525
1526static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1527 if (Is == IS_VGPR) {
1528 switch (RegWidth) {
1529 default: return -1;
1530 case 1: return AMDGPU::VGPR_32RegClassID;
1531 case 2: return AMDGPU::VReg_64RegClassID;
1532 case 3: return AMDGPU::VReg_96RegClassID;
1533 case 4: return AMDGPU::VReg_128RegClassID;
1534 case 8: return AMDGPU::VReg_256RegClassID;
1535 case 16: return AMDGPU::VReg_512RegClassID;
1536 }
1537 } else if (Is == IS_TTMP) {
1538 switch (RegWidth) {
1539 default: return -1;
1540 case 1: return AMDGPU::TTMP_32RegClassID;
1541 case 2: return AMDGPU::TTMP_64RegClassID;
1542 case 4: return AMDGPU::TTMP_128RegClassID;
1543 case 8: return AMDGPU::TTMP_256RegClassID;
1544 case 16: return AMDGPU::TTMP_512RegClassID;
1545 }
1546 } else if (Is == IS_SGPR) {
1547 switch (RegWidth) {
1548 default: return -1;
1549 case 1: return AMDGPU::SGPR_32RegClassID;
1550 case 2: return AMDGPU::SGPR_64RegClassID;
1551 case 4: return AMDGPU::SGPR_128RegClassID;
1552 case 8: return AMDGPU::SGPR_256RegClassID;
1553 case 16: return AMDGPU::SGPR_512RegClassID;
1554 }
1555 }
1556 return -1;
1557}
1558
1559static unsigned getSpecialRegForName(StringRef RegName) {
1560 return StringSwitch<unsigned>(RegName)
1561 .Case("exec", AMDGPU::EXEC)
1562 .Case("vcc", AMDGPU::VCC)
1563 .Case("flat_scratch", AMDGPU::FLAT_SCR)
1564 .Case("xnack_mask", AMDGPU::XNACK_MASK)
1565 .Case("m0", AMDGPU::M0)
1566 .Case("scc", AMDGPU::SCC)
1567 .Case("tba", AMDGPU::TBA)
1568 .Case("tma", AMDGPU::TMA)
1569 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1570 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
1571 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1572 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
1573 .Case("vcc_lo", AMDGPU::VCC_LO)
1574 .Case("vcc_hi", AMDGPU::VCC_HI)
1575 .Case("exec_lo", AMDGPU::EXEC_LO)
1576 .Case("exec_hi", AMDGPU::EXEC_HI)
1577 .Case("tma_lo", AMDGPU::TMA_LO)
1578 .Case("tma_hi", AMDGPU::TMA_HI)
1579 .Case("tba_lo", AMDGPU::TBA_LO)
1580 .Case("tba_hi", AMDGPU::TBA_HI)
1581 .Default(0);
1582}
1583
1584bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1585 SMLoc &EndLoc) {
1586 auto R = parseRegister();
1587 if (!R) return true;
1588 assert(R->isReg())(static_cast <bool> (R->isReg()) ? void (0) : __assert_fail
("R->isReg()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1588, __extension__ __PRETTY_FUNCTION__))
;
1589 RegNo = R->getReg();
1590 StartLoc = R->getStartLoc();
1591 EndLoc = R->getEndLoc();
1592 return false;
1593}
1594
1595bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1596 RegisterKind RegKind, unsigned Reg1,
1597 unsigned RegNum) {
1598 switch (RegKind) {
1599 case IS_SPECIAL:
1600 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1601 Reg = AMDGPU::EXEC;
1602 RegWidth = 2;
1603 return true;
1604 }
1605 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1606 Reg = AMDGPU::FLAT_SCR;
1607 RegWidth = 2;
1608 return true;
1609 }
1610 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1611 Reg = AMDGPU::XNACK_MASK;
1612 RegWidth = 2;
1613 return true;
1614 }
1615 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1616 Reg = AMDGPU::VCC;
1617 RegWidth = 2;
1618 return true;
1619 }
1620 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1621 Reg = AMDGPU::TBA;
1622 RegWidth = 2;
1623 return true;
1624 }
1625 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1626 Reg = AMDGPU::TMA;
1627 RegWidth = 2;
1628 return true;
1629 }
1630 return false;
1631 case IS_VGPR:
1632 case IS_SGPR:
1633 case IS_TTMP:
1634 if (Reg1 != Reg + RegWidth) {
1635 return false;
1636 }
1637 RegWidth++;
1638 return true;
1639 default:
1640 llvm_unreachable("unexpected register kind")::llvm::llvm_unreachable_internal("unexpected register kind",
"/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1640)
;
1641 }
1642}
1643
1644bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1645 unsigned &RegNum, unsigned &RegWidth,
1646 unsigned *DwordRegIndex) {
1647 if (DwordRegIndex) { *DwordRegIndex = 0; }
1648 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1649 if (getLexer().is(AsmToken::Identifier)) {
1650 StringRef RegName = Parser.getTok().getString();
1651 if ((Reg = getSpecialRegForName(RegName))) {
1652 Parser.Lex();
1653 RegKind = IS_SPECIAL;
1654 } else {
1655 unsigned RegNumIndex = 0;
1656 if (RegName[0] == 'v') {
1657 RegNumIndex = 1;
1658 RegKind = IS_VGPR;
1659 } else if (RegName[0] == 's') {
1660 RegNumIndex = 1;
1661 RegKind = IS_SGPR;
1662 } else if (RegName.startswith("ttmp")) {
1663 RegNumIndex = strlen("ttmp");
1664 RegKind = IS_TTMP;
1665 } else {
1666 return false;
1667 }
1668 if (RegName.size() > RegNumIndex) {
1669 // Single 32-bit register: vXX.
1670 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1671 return false;
1672 Parser.Lex();
1673 RegWidth = 1;
1674 } else {
1675 // Range of registers: v[XX:YY]. ":YY" is optional.
1676 Parser.Lex();
1677 int64_t RegLo, RegHi;
1678 if (getLexer().isNot(AsmToken::LBrac))
1679 return false;
1680 Parser.Lex();
1681
1682 if (getParser().parseAbsoluteExpression(RegLo))
1683 return false;
1684
1685 const bool isRBrace = getLexer().is(AsmToken::RBrac);
1686 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1687 return false;
1688 Parser.Lex();
1689
1690 if (isRBrace) {
1691 RegHi = RegLo;
1692 } else {
1693 if (getParser().parseAbsoluteExpression(RegHi))
1694 return false;
1695
1696 if (getLexer().isNot(AsmToken::RBrac))
1697 return false;
1698 Parser.Lex();
1699 }
1700 RegNum = (unsigned) RegLo;
1701 RegWidth = (RegHi - RegLo) + 1;
1702 }
1703 }
1704 } else if (getLexer().is(AsmToken::LBrac)) {
1705 // List of consecutive registers: [s0,s1,s2,s3]
1706 Parser.Lex();
1707 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
1708 return false;
1709 if (RegWidth != 1)
1710 return false;
1711 RegisterKind RegKind1;
1712 unsigned Reg1, RegNum1, RegWidth1;
1713 do {
1714 if (getLexer().is(AsmToken::Comma)) {
1715 Parser.Lex();
1716 } else if (getLexer().is(AsmToken::RBrac)) {
1717 Parser.Lex();
1718 break;
1719 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
1720 if (RegWidth1 != 1) {
1721 return false;
1722 }
1723 if (RegKind1 != RegKind) {
1724 return false;
1725 }
1726 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1727 return false;
1728 }
1729 } else {
1730 return false;
1731 }
1732 } while (true);
1733 } else {
1734 return false;
1735 }
1736 switch (RegKind) {
1737 case IS_SPECIAL:
1738 RegNum = 0;
1739 RegWidth = 1;
1740 break;
1741 case IS_VGPR:
1742 case IS_SGPR:
1743 case IS_TTMP:
1744 {
1745 unsigned Size = 1;
1746 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1747 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
1748 Size = std::min(RegWidth, 4u);
1749 }
1750 if (RegNum % Size != 0)
1751 return false;
1752 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
1753 RegNum = RegNum / Size;
1754 int RCID = getRegClass(RegKind, RegWidth);
1755 if (RCID == -1)
1756 return false;
1757 const MCRegisterClass RC = TRI->getRegClass(RCID);
1758 if (RegNum >= RC.getNumRegs())
1759 return false;
1760 Reg = RC.getRegister(RegNum);
1761 break;
1762 }
1763
1764 default:
1765 llvm_unreachable("unexpected register kind")::llvm::llvm_unreachable_internal("unexpected register kind",
"/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1765)
;
1766 }
1767
1768 if (!subtargetHasRegister(*TRI, Reg))
1769 return false;
1770 return true;
1771}
1772
1773std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
1774 const auto &Tok = Parser.getTok();
1775 SMLoc StartLoc = Tok.getLoc();
1776 SMLoc EndLoc = Tok.getEndLoc();
1777 RegisterKind RegKind;
1778 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
1779
1780 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
1781 return nullptr;
1782 }
1783 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
1784 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
1785}
1786
1787bool
1788AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1789 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1790 (getLexer().getKind() == AsmToken::Integer ||
1791 getLexer().getKind() == AsmToken::Real)) {
1792 // This is a workaround for handling operands like these:
1793 // |1.0|
1794 // |-1|
1795 // This syntax is not compatible with syntax of standard
1796 // MC expressions (due to the trailing '|').
1797
1798 SMLoc EndLoc;
1799 const MCExpr *Expr;
1800
1801 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1802 return true;
1803 }
1804
1805 return !Expr->evaluateAsAbsolute(Val);
1806 }
1807
1808 return getParser().parseAbsoluteExpression(Val);
1809}
1810
1811OperandMatchResultTy
1812AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
1813 // TODO: add syntactic sugar for 1/(2*PI)
1814 bool Minus = false;
1815 if (getLexer().getKind() == AsmToken::Minus) {
1816 const AsmToken NextToken = getLexer().peekTok();
1817 if (!NextToken.is(AsmToken::Integer) &&
1818 !NextToken.is(AsmToken::Real)) {
1819 return MatchOperand_NoMatch;
1820 }
1821 Minus = true;
1822 Parser.Lex();
1823 }
1824
1825 SMLoc S = Parser.getTok().getLoc();
1826 switch(getLexer().getKind()) {
1827 case AsmToken::Integer: {
1828 int64_t IntVal;
1829 if (parseAbsoluteExpr(IntVal, AbsMod))
1830 return MatchOperand_ParseFail;
1831 if (Minus)
1832 IntVal *= -1;
1833 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
1834 return MatchOperand_Success;
1835 }
1836 case AsmToken::Real: {
1837 int64_t IntVal;
1838 if (parseAbsoluteExpr(IntVal, AbsMod))
1839 return MatchOperand_ParseFail;
1840
1841 APFloat F(BitsToDouble(IntVal));
1842 if (Minus)
1843 F.changeSign();
1844 Operands.push_back(
1845 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
1846 AMDGPUOperand::ImmTyNone, true));
1847 return MatchOperand_Success;
1848 }
1849 default:
1850 return MatchOperand_NoMatch;
1851 }
1852}
1853
1854OperandMatchResultTy
1855AMDGPUAsmParser::parseReg(OperandVector &Operands) {
1856 if (auto R = parseRegister()) {
1857 assert(R->isReg())(static_cast <bool> (R->isReg()) ? void (0) : __assert_fail
("R->isReg()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1857, __extension__ __PRETTY_FUNCTION__))
;
1858 R->Reg.IsForcedVOP3 = isForcedVOP3();
1859 Operands.push_back(std::move(R));
1860 return MatchOperand_Success;
1861 }
1862 return MatchOperand_NoMatch;
1863}
1864
1865OperandMatchResultTy
1866AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1867 auto res = parseImm(Operands, AbsMod);
1868 if (res != MatchOperand_NoMatch) {
1869 return res;
1870 }
1871
1872 return parseReg(Operands);
1873}
1874
1875OperandMatchResultTy
1876AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1877 bool AllowImm) {
1878 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
1879
1880 if (getLexer().getKind()== AsmToken::Minus) {
1881 const AsmToken NextToken = getLexer().peekTok();
1882
1883 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1884 if (NextToken.is(AsmToken::Minus)) {
1885 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1886 return MatchOperand_ParseFail;
1887 }
1888
1889 // '-' followed by an integer literal N should be interpreted as integer
1890 // negation rather than a floating-point NEG modifier applied to N.
1891 // Beside being contr-intuitive, such use of floating-point NEG modifier
1892 // results in different meaning of integer literals used with VOP1/2/C
1893 // and VOP3, for example:
1894 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1895 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1896 // Negative fp literals should be handled likewise for unifomtity
1897 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1898 Parser.Lex();
1899 Negate = true;
1900 }
1901 }
1902
1903 if (getLexer().getKind() == AsmToken::Identifier &&
1904 Parser.getTok().getString() == "neg") {
1905 if (Negate) {
1906 Error(Parser.getTok().getLoc(), "expected register or immediate");
1907 return MatchOperand_ParseFail;
1908 }
1909 Parser.Lex();
1910 Negate2 = true;
1911 if (getLexer().isNot(AsmToken::LParen)) {
1912 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1913 return MatchOperand_ParseFail;
1914 }
1915 Parser.Lex();
1916 }
1917
1918 if (getLexer().getKind() == AsmToken::Identifier &&
1919 Parser.getTok().getString() == "abs") {
1920 Parser.Lex();
1921 Abs2 = true;
1922 if (getLexer().isNot(AsmToken::LParen)) {
1923 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1924 return MatchOperand_ParseFail;
1925 }
1926 Parser.Lex();
1927 }
1928
1929 if (getLexer().getKind() == AsmToken::Pipe) {
1930 if (Abs2) {
1931 Error(Parser.getTok().getLoc(), "expected register or immediate");
1932 return MatchOperand_ParseFail;
1933 }
1934 Parser.Lex();
1935 Abs = true;
1936 }
1937
1938 OperandMatchResultTy Res;
1939 if (AllowImm) {
1940 Res = parseRegOrImm(Operands, Abs);
1941 } else {
1942 Res = parseReg(Operands);
1943 }
1944 if (Res != MatchOperand_Success) {
1945 return Res;
1946 }
1947
1948 AMDGPUOperand::Modifiers Mods;
1949 if (Abs) {
1950 if (getLexer().getKind() != AsmToken::Pipe) {
1951 Error(Parser.getTok().getLoc(), "expected vertical bar");
1952 return MatchOperand_ParseFail;
1953 }
1954 Parser.Lex();
1955 Mods.Abs = true;
1956 }
1957 if (Abs2) {
1958 if (getLexer().isNot(AsmToken::RParen)) {
1959 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1960 return MatchOperand_ParseFail;
1961 }
1962 Parser.Lex();
1963 Mods.Abs = true;
1964 }
1965
1966 if (Negate) {
1967 Mods.Neg = true;
1968 } else if (Negate2) {
1969 if (getLexer().isNot(AsmToken::RParen)) {
1970 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1971 return MatchOperand_ParseFail;
1972 }
1973 Parser.Lex();
1974 Mods.Neg = true;
1975 }
1976
1977 if (Mods.hasFPModifiers()) {
1978 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1979 Op.setModifiers(Mods);
1980 }
1981 return MatchOperand_Success;
1982}
1983
1984OperandMatchResultTy
1985AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1986 bool AllowImm) {
1987 bool Sext = false;
1988
1989 if (getLexer().getKind() == AsmToken::Identifier &&
1990 Parser.getTok().getString() == "sext") {
1991 Parser.Lex();
1992 Sext = true;
1993 if (getLexer().isNot(AsmToken::LParen)) {
1994 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1995 return MatchOperand_ParseFail;
1996 }
1997 Parser.Lex();
1998 }
1999
2000 OperandMatchResultTy Res;
2001 if (AllowImm) {
2002 Res = parseRegOrImm(Operands);
2003 } else {
2004 Res = parseReg(Operands);
2005 }
2006 if (Res != MatchOperand_Success) {
2007 return Res;
2008 }
2009
2010 AMDGPUOperand::Modifiers Mods;
2011 if (Sext) {
2012 if (getLexer().isNot(AsmToken::RParen)) {
2013 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2014 return MatchOperand_ParseFail;
2015 }
2016 Parser.Lex();
2017 Mods.Sext = true;
2018 }
2019
2020 if (Mods.hasIntModifiers()) {
2021 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
2022 Op.setModifiers(Mods);
2023 }
2024
2025 return MatchOperand_Success;
2026}
2027
2028OperandMatchResultTy
2029AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2030 return parseRegOrImmWithFPInputMods(Operands, false);
2031}
2032
2033OperandMatchResultTy
2034AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2035 return parseRegOrImmWithIntInputMods(Operands, false);
2036}
2037
2038OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
2039 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2040 if (Reg) {
2041 Operands.push_back(std::move(Reg));
2042 return MatchOperand_Success;
2043 }
2044
2045 const AsmToken &Tok = Parser.getTok();
2046 if (Tok.getString() == "off") {
2047 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
2048 AMDGPUOperand::ImmTyOff, false));
2049 Parser.Lex();
2050 return MatchOperand_Success;
2051 }
2052
2053 return MatchOperand_NoMatch;
2054}
2055
2056unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
2057 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2058
2059 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
2060 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2061 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2062 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
2063 return Match_InvalidOperand;
2064
2065 if ((TSFlags & SIInstrFlags::VOP3) &&
2066 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2067 getForcedEncodingSize() != 64)
2068 return Match_PreferE32;
2069
2070 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2071 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
2072 // v_mac_f32/16 allow only dst_sel == DWORD;
2073 auto OpNum =
2074 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
2075 const auto &Op = Inst.getOperand(OpNum);
2076 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2077 return Match_InvalidOperand;
2078 }
2079 }
2080
2081 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2082 // FIXME: Produces error without correct column reported.
2083 auto OpNum =
2084 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2085 const auto &Op = Inst.getOperand(OpNum);
2086 if (Op.getImm() != 0)
2087 return Match_InvalidOperand;
2088 }
2089
2090 return Match_Success;
2091}
2092
2093// What asm variants we should check
2094ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2095 if (getForcedEncodingSize() == 32) {
2096 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2097 return makeArrayRef(Variants);
2098 }
2099
2100 if (isForcedVOP3()) {
2101 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2102 return makeArrayRef(Variants);
2103 }
2104
2105 if (isForcedSDWA()) {
2106 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2107 AMDGPUAsmVariants::SDWA9};
2108 return makeArrayRef(Variants);
2109 }
2110
2111 if (isForcedDPP()) {
2112 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2113 return makeArrayRef(Variants);
2114 }
2115
2116 static const unsigned Variants[] = {
2117 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
2118 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
2119 };
2120
2121 return makeArrayRef(Variants);
2122}
2123
2124unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2125 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2126 const unsigned Num = Desc.getNumImplicitUses();
2127 for (unsigned i = 0; i < Num; ++i) {
2128 unsigned Reg = Desc.ImplicitUses[i];
2129 switch (Reg) {
2130 case AMDGPU::FLAT_SCR:
2131 case AMDGPU::VCC:
2132 case AMDGPU::M0:
2133 return Reg;
2134 default:
2135 break;
2136 }
2137 }
2138 return AMDGPU::NoRegister;
2139}
2140
2141// NB: This code is correct only when used to check constant
2142// bus limitations because GFX7 support no f16 inline constants.
2143// Note that there are no cases when a GFX7 opcode violates
2144// constant bus limitations due to the use of an f16 constant.
2145bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2146 unsigned OpIdx) const {
2147 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2148
2149 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2150 return false;
2151 }
2152
2153 const MCOperand &MO = Inst.getOperand(OpIdx);
2154
2155 int64_t Val = MO.getImm();
2156 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2157
2158 switch (OpSize) { // expected operand size
2159 case 8:
2160 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2161 case 4:
2162 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2163 case 2: {
2164 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2165 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2166 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2167 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2168 } else {
2169 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2170 }
2171 }
2172 default:
2173 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2173)
;
2174 }
2175}
2176
2177bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2178 const MCOperand &MO = Inst.getOperand(OpIdx);
2179 if (MO.isImm()) {
2180 return !isInlineConstant(Inst, OpIdx);
2181 }
2182 return !MO.isReg() ||
2183 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
2184}
2185
2186bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
2187 const unsigned Opcode = Inst.getOpcode();
2188 const MCInstrDesc &Desc = MII.get(Opcode);
2189 unsigned ConstantBusUseCount = 0;
2190
2191 if (Desc.TSFlags &
2192 (SIInstrFlags::VOPC |
2193 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
2194 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2195 SIInstrFlags::SDWA)) {
2196 // Check special imm operands (used by madmk, etc)
2197 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2198 ++ConstantBusUseCount;
2199 }
2200
2201 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2202 if (SGPRUsed != AMDGPU::NoRegister) {
2203 ++ConstantBusUseCount;
2204 }
2205
2206 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2207 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2208 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2209
2210 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2211
2212 for (int OpIdx : OpIndices) {
2213 if (OpIdx == -1) break;
2214
2215 const MCOperand &MO = Inst.getOperand(OpIdx);
2216 if (usesConstantBus(Inst, OpIdx)) {
2217 if (MO.isReg()) {
2218 const unsigned Reg = mc2PseudoReg(MO.getReg());
2219 // Pairs of registers with a partial intersections like these
2220 // s0, s[0:1]
2221 // flat_scratch_lo, flat_scratch
2222 // flat_scratch_lo, flat_scratch_hi
2223 // are theoretically valid but they are disabled anyway.
2224 // Note that this code mimics SIInstrInfo::verifyInstruction
2225 if (Reg != SGPRUsed) {
2226 ++ConstantBusUseCount;
2227 }
2228 SGPRUsed = Reg;
2229 } else { // Expression or a literal
2230 ++ConstantBusUseCount;
2231 }
2232 }
2233 }
2234 }
2235
2236 return ConstantBusUseCount <= 1;
2237}
2238
2239bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
2240 const unsigned Opcode = Inst.getOpcode();
2241 const MCInstrDesc &Desc = MII.get(Opcode);
2242
2243 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2244 if (DstIdx == -1 ||
2245 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2246 return true;
2247 }
2248
2249 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2250
2251 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2252 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2253 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2254
2255 assert(DstIdx != -1)(static_cast <bool> (DstIdx != -1) ? void (0) : __assert_fail
("DstIdx != -1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2255, __extension__ __PRETTY_FUNCTION__))
;
2256 const MCOperand &Dst = Inst.getOperand(DstIdx);
2257 assert(Dst.isReg())(static_cast <bool> (Dst.isReg()) ? void (0) : __assert_fail
("Dst.isReg()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2257, __extension__ __PRETTY_FUNCTION__))
;
2258 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2259
2260 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2261
2262 for (int SrcIdx : SrcIndices) {
2263 if (SrcIdx == -1) break;
2264 const MCOperand &Src = Inst.getOperand(SrcIdx);
2265 if (Src.isReg()) {
2266 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2267 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2268 return false;
2269 }
2270 }
2271 }
2272
2273 return true;
2274}
2275
2276bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2277
2278 const unsigned Opc = Inst.getOpcode();
2279 const MCInstrDesc &Desc = MII.get(Opc);
2280
2281 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2282 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2283 assert(ClampIdx != -1)(static_cast <bool> (ClampIdx != -1) ? void (0) : __assert_fail
("ClampIdx != -1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2283, __extension__ __PRETTY_FUNCTION__))
;
2284 return Inst.getOperand(ClampIdx).getImm() == 0;
2285 }
2286
2287 return true;
2288}
2289
2290bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2291
2292 const unsigned Opc = Inst.getOpcode();
2293 const MCInstrDesc &Desc = MII.get(Opc);
2294
2295 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2296 return true;
2297
2298 // Gather4 instructions seem to have special rules not described in spec.
2299 if (Desc.TSFlags & SIInstrFlags::Gather4)
2300 return true;
2301
2302 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2303 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2304 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2305
2306 assert(VDataIdx != -1)(static_cast <bool> (VDataIdx != -1) ? void (0) : __assert_fail
("VDataIdx != -1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2306, __extension__ __PRETTY_FUNCTION__))
;
2307 assert(DMaskIdx != -1)(static_cast <bool> (DMaskIdx != -1) ? void (0) : __assert_fail
("DMaskIdx != -1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2307, __extension__ __PRETTY_FUNCTION__))
;
2308 assert(TFEIdx != -1)(static_cast <bool> (TFEIdx != -1) ? void (0) : __assert_fail
("TFEIdx != -1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2308, __extension__ __PRETTY_FUNCTION__))
;
2309
2310 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2311 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2312 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2313 if (DMask == 0)
2314 DMask = 1;
2315
2316 unsigned DataSize = countPopulation(DMask);
2317 if ((Desc.TSFlags & SIInstrFlags::D16) != 0 && hasPackedD16()) {
2318 DataSize = (DataSize + 1) / 2;
2319 }
2320
2321 return (VDataSize / 4) == DataSize + TFESize;
2322}
2323
2324bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2325
2326 const unsigned Opc = Inst.getOpcode();
2327 const MCInstrDesc &Desc = MII.get(Opc);
2328
2329 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2330 return true;
2331 if (!Desc.mayLoad() || !Desc.mayStore())
2332 return true; // Not atomic
2333
2334 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2335 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2336
2337 // This is an incomplete check because image_atomic_cmpswap
2338 // may only use 0x3 and 0xf while other atomic operations
2339 // may use 0x1 and 0x3. However these limitations are
2340 // verified when we check that dmask matches dst size.
2341 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2342}
2343
2344bool AMDGPUAsmParser::validateMIMGR128(const MCInst &Inst) {
2345
2346 const unsigned Opc = Inst.getOpcode();
2347 const MCInstrDesc &Desc = MII.get(Opc);
2348
2349 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2350 return true;
2351
2352 int Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::r128);
2353 assert(Idx != -1)(static_cast <bool> (Idx != -1) ? void (0) : __assert_fail
("Idx != -1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2353, __extension__ __PRETTY_FUNCTION__))
;
2354
2355 bool R128 = (Inst.getOperand(Idx).getImm() != 0);
2356
2357 return !R128 || hasMIMG_R128();
2358}
2359
2360bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2361
2362 const unsigned Opc = Inst.getOpcode();
2363 const MCInstrDesc &Desc = MII.get(Opc);
2364
2365 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2366 return true;
2367 if ((Desc.TSFlags & SIInstrFlags::D16) == 0)
2368 return true;
2369
2370 return !isCI() && !isSI();
2371}
2372
2373bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2374 const SMLoc &IDLoc) {
2375 if (!validateConstantBusLimitations(Inst)) {
2376 Error(IDLoc,
2377 "invalid operand (violates constant bus restrictions)");
2378 return false;
2379 }
2380 if (!validateEarlyClobberLimitations(Inst)) {
2381 Error(IDLoc,
2382 "destination must be different than all sources");
2383 return false;
2384 }
2385 if (!validateIntClampSupported(Inst)) {
2386 Error(IDLoc,
2387 "integer clamping is not supported on this GPU");
2388 return false;
2389 }
2390 if (!validateMIMGR128(Inst)) {
2391 Error(IDLoc,
2392 "r128 modifier is not supported on this GPU");
2393 return false;
2394 }
2395 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2396 if (!validateMIMGD16(Inst)) {
2397 Error(IDLoc,
2398 "d16 modifier is not supported on this GPU");
2399 return false;
2400 }
2401 if (!validateMIMGDataSize(Inst)) {
2402 Error(IDLoc,
2403 "image data size does not match dmask and tfe");
2404 return false;
2405 }
2406 if (!validateMIMGAtomicDMask(Inst)) {
2407 Error(IDLoc,
2408 "invalid atomic image dmask");
2409 return false;
2410 }
2411
2412 return true;
2413}
2414
2415static std::string AMDGPUMnemonicSpellCheck(StringRef S, uint64_t FBS,
2416 unsigned VariantID = 0);
2417
2418bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2419 OperandVector &Operands,
2420 MCStreamer &Out,
2421 uint64_t &ErrorInfo,
2422 bool MatchingInlineAsm) {
2423 MCInst Inst;
2424 unsigned Result = Match_Success;
2425 for (auto Variant : getMatchedVariants()) {
2426 uint64_t EI;
2427 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2428 Variant);
2429 // We order match statuses from least to most specific. We use most specific
2430 // status as resulting
2431 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2432 if ((R == Match_Success) ||
2433 (R == Match_PreferE32) ||
2434 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2435 (R == Match_InvalidOperand && Result != Match_MissingFeature
2436 && Result != Match_PreferE32) ||
2437 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2438 && Result != Match_MissingFeature
2439 && Result != Match_PreferE32)) {
2440 Result = R;
2441 ErrorInfo = EI;
2442 }
2443 if (R == Match_Success)
2444 break;
2445 }
2446
2447 switch (Result) {
2448 default: break;
2449 case Match_Success:
2450 if (!validateInstruction(Inst, IDLoc)) {
2451 return true;
2452 }
2453 Inst.setLoc(IDLoc);
2454 Out.EmitInstruction(Inst, getSTI());
2455 return false;
2456
2457 case Match_MissingFeature:
2458 return Error(IDLoc, "instruction not supported on this GPU");
2459
2460 case Match_MnemonicFail: {
2461 uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
2462 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2463 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2464 return Error(IDLoc, "invalid instruction" + Suggestion,
2465 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2466 }
2467
2468 case Match_InvalidOperand: {
2469 SMLoc ErrorLoc = IDLoc;
2470 if (ErrorInfo != ~0ULL) {
2471 if (ErrorInfo >= Operands.size()) {
2472 return Error(IDLoc, "too few operands for instruction");
2473 }
2474 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2475 if (ErrorLoc == SMLoc())
2476 ErrorLoc = IDLoc;
2477 }
2478 return Error(ErrorLoc, "invalid operand for instruction");
2479 }
2480
2481 case Match_PreferE32:
2482 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2483 "should be encoded as e32");
2484 }
2485 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2485)
;
2486}
2487
2488bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2489 int64_t Tmp = -1;
2490 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
4
Calling 'MCAsmParserExtension::getLexer'
7
Returning from 'MCAsmParserExtension::getLexer'
8
Calling 'MCAsmLexer::isNot'
14
Returning from 'MCAsmLexer::isNot'
2491 return true;
2492 }
2493 if (getParser().parseAbsoluteExpression(Tmp)) {
15
Calling 'MCAsmParserExtension::getParser'
16
Returning from 'MCAsmParserExtension::getParser'
17
Assuming the condition is true
18
Taking true branch
2494 return true;
2495 }
2496 Ret = static_cast<uint32_t>(Tmp);
2497 return false;
2498}
2499
2500bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2501 uint32_t &Minor) {
2502 if (ParseAsAbsoluteExpression(Major))
3
Calling 'AMDGPUAsmParser::ParseAsAbsoluteExpression'
19
Returning from 'AMDGPUAsmParser::ParseAsAbsoluteExpression'
20
Taking true branch
2503 return TokError("invalid major version");
21
Calling constructor for 'Twine'
28
Returning from constructor for 'Twine'
29
Calling 'MCAsmParserExtension::TokError'
32
Returning from 'MCAsmParserExtension::TokError'
2504
2505 if (getLexer().isNot(AsmToken::Comma))
2506 return TokError("minor version number required, comma expected");
2507 Lex();
2508
2509 if (ParseAsAbsoluteExpression(Minor))
2510 return TokError("invalid minor version");
2511
2512 return false;
2513}
2514
2515bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
2516 uint32_t Major;
1
'Major' declared without an initial value
2517 uint32_t Minor;
2518
2519 if (ParseDirectiveMajorMinor(Major, Minor))
2
Calling 'AMDGPUAsmParser::ParseDirectiveMajorMinor'
33
Returning from 'AMDGPUAsmParser::ParseDirectiveMajorMinor'
34
Assuming the condition is false
35
Taking false branch
2520 return true;
2521
2522 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
36
1st function call argument is an uninitialized value
2523 return false;
2524}
2525
2526bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
2527 uint32_t Major;
2528 uint32_t Minor;
2529 uint32_t Stepping;
2530 StringRef VendorName;
2531 StringRef ArchName;
2532
2533 // If this directive has no arguments, then use the ISA version for the
2534 // targeted GPU.
2535 if (getLexer().is(AsmToken::EndOfStatement)) {
2536 AMDGPU::IsaInfo::IsaVersion ISA =
2537 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
2538 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2539 ISA.Stepping,
2540 "AMD", "AMDGPU");
2541 return false;
2542 }
2543
2544 if (ParseDirectiveMajorMinor(Major, Minor))
2545 return true;
2546
2547 if (getLexer().isNot(AsmToken::Comma))
2548 return TokError("stepping version number required, comma expected");
2549 Lex();
2550
2551 if (ParseAsAbsoluteExpression(Stepping))
2552 return TokError("invalid stepping version");
2553
2554 if (getLexer().isNot(AsmToken::Comma))
2555 return TokError("vendor name required, comma expected");
2556 Lex();
2557
2558 if (getLexer().isNot(AsmToken::String))
2559 return TokError("invalid vendor name");
2560
2561 VendorName = getLexer().getTok().getStringContents();
2562 Lex();
2563
2564 if (getLexer().isNot(AsmToken::Comma))
2565 return TokError("arch name required, comma expected");
2566 Lex();
2567
2568 if (getLexer().isNot(AsmToken::String))
2569 return TokError("invalid arch name");
2570
2571 ArchName = getLexer().getTok().getStringContents();
2572 Lex();
2573
2574 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2575 VendorName, ArchName);
2576 return false;
2577}
2578
2579bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2580 amd_kernel_code_t &Header) {
2581 SmallString<40> ErrStr;
2582 raw_svector_ostream Err(ErrStr);
2583 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
2584 return TokError(Err.str());
2585 }
2586 Lex();
2587 return false;
2588}
2589
2590bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
2591 amd_kernel_code_t Header;
2592 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
2593
2594 while (true) {
2595 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2596 // will set the current token to EndOfStatement.
2597 while(getLexer().is(AsmToken::EndOfStatement))
2598 Lex();
2599
2600 if (getLexer().isNot(AsmToken::Identifier))
2601 return TokError("expected value identifier or .end_amd_kernel_code_t");
2602
2603 StringRef ID = getLexer().getTok().getIdentifier();
2604 Lex();
2605
2606 if (ID == ".end_amd_kernel_code_t")
2607 break;
2608
2609 if (ParseAMDKernelCodeTValue(ID, Header))
2610 return true;
2611 }
2612
2613 getTargetStreamer().EmitAMDKernelCodeT(Header);
2614
2615 return false;
2616}
2617
2618bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2619 if (getLexer().isNot(AsmToken::Identifier))
2620 return TokError("expected symbol name");
2621
2622 StringRef KernelName = Parser.getTok().getString();
2623
2624 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2625 ELF::STT_AMDGPU_HSA_KERNEL);
2626 Lex();
2627 KernelScope.initialize(getContext());
2628 return false;
2629}
2630
2631bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
2632 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
2633 return Error(getParser().getTok().getLoc(),
2634 ".amd_amdgpu_isa directive is not available on non-amdgcn "
2635 "architectures");
2636 }
2637
2638 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
2639
2640 std::string ISAVersionStringFromSTI;
2641 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
2642 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
2643
2644 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
2645 return Error(getParser().getTok().getLoc(),
2646 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
2647 "arguments specified through the command line");
2648 }
2649
2650 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
2651 Lex();
2652
2653 return false;
2654}
2655
2656bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
2657 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
2658 return Error(getParser().getTok().getLoc(),
2659 (Twine(HSAMD::AssemblerDirectiveBegin) + Twine(" directive is "
2660 "not available on non-amdhsa OSes")).str());
2661 }
2662
2663 std::string HSAMetadataString;
2664 raw_string_ostream YamlStream(HSAMetadataString);
2665
2666 getLexer().setSkipSpace(false);
2667
2668 bool FoundEnd = false;
2669 while (!getLexer().is(AsmToken::Eof)) {
2670 while (getLexer().is(AsmToken::Space)) {
2671 YamlStream << getLexer().getTok().getString();
2672 Lex();
2673 }
2674
2675 if (getLexer().is(AsmToken::Identifier)) {
2676 StringRef ID = getLexer().getTok().getIdentifier();
2677 if (ID == AMDGPU::HSAMD::AssemblerDirectiveEnd) {
2678 Lex();
2679 FoundEnd = true;
2680 break;
2681 }
2682 }
2683
2684 YamlStream << Parser.parseStringToEndOfStatement()
2685 << getContext().getAsmInfo()->getSeparatorString();
2686
2687 Parser.eatToEndOfStatement();
2688 }
2689
2690 getLexer().setSkipSpace(true);
2691
2692 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
2693 return TokError(Twine("expected directive ") +
2694 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
2695 }
2696
2697 YamlStream.flush();
2698
2699 if (!getTargetStreamer().EmitHSAMetadata(HSAMetadataString))
2700 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
2701
2702 return false;
2703}
2704
2705bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
2706 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
2707 return Error(getParser().getTok().getLoc(),
2708 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
2709 "not available on non-amdpal OSes")).str());
2710 }
2711
2712 PALMD::Metadata PALMetadata;
2713 for (;;) {
2714 uint32_t Value;
2715 if (ParseAsAbsoluteExpression(Value)) {
2716 return TokError(Twine("invalid value in ") +
2717 Twine(PALMD::AssemblerDirective));
2718 }
2719 PALMetadata.push_back(Value);
2720 if (getLexer().isNot(AsmToken::Comma))
2721 break;
2722 Lex();
2723 }
2724 getTargetStreamer().EmitPALMetadata(PALMetadata);
2725 return false;
2726}
2727
2728bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
2729 StringRef IDVal = DirectiveID.getString();
2730
2731 if (IDVal == ".hsa_code_object_version")
2732 return ParseDirectiveHSACodeObjectVersion();
2733
2734 if (IDVal == ".hsa_code_object_isa")
2735 return ParseDirectiveHSACodeObjectISA();
2736
2737 if (IDVal == ".amd_kernel_code_t")
2738 return ParseDirectiveAMDKernelCodeT();
2739
2740 if (IDVal == ".amdgpu_hsa_kernel")
2741 return ParseDirectiveAMDGPUHsaKernel();
2742
2743 if (IDVal == ".amd_amdgpu_isa")
2744 return ParseDirectiveISAVersion();
2745
2746 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
2747 return ParseDirectiveHSAMetadata();
2748
2749 if (IDVal == PALMD::AssemblerDirective)
2750 return ParseDirectivePALMetadata();
2751
2752 return true;
2753}
2754
2755bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2756 unsigned RegNo) const {
2757
2758 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
2759 R.isValid(); ++R) {
2760 if (*R == RegNo)
2761 return isGFX9();
2762 }
2763
2764 switch (RegNo) {
2765 case AMDGPU::TBA:
2766 case AMDGPU::TBA_LO:
2767 case AMDGPU::TBA_HI:
2768 case AMDGPU::TMA:
2769 case AMDGPU::TMA_LO:
2770 case AMDGPU::TMA_HI:
2771 return !isGFX9();
2772 case AMDGPU::XNACK_MASK:
2773 case AMDGPU::XNACK_MASK_LO:
2774 case AMDGPU::XNACK_MASK_HI:
2775 return !isCI() && !isSI() && hasXNACK();
2776 default:
2777 break;
2778 }
2779
2780 if (isCI())
2781 return true;
2782
2783 if (isSI()) {
2784 // No flat_scr
2785 switch (RegNo) {
2786 case AMDGPU::FLAT_SCR:
2787 case AMDGPU::FLAT_SCR_LO:
2788 case AMDGPU::FLAT_SCR_HI:
2789 return false;
2790 default:
2791 return true;
2792 }
2793 }
2794
2795 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2796 // SI/CI have.
2797 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2798 R.isValid(); ++R) {
2799 if (*R == RegNo)
2800 return false;
2801 }
2802
2803 return true;
2804}
2805
2806OperandMatchResultTy
2807AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
2808 // Try to parse with a custom parser
2809 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2810
2811 // If we successfully parsed the operand or if there as an error parsing,
2812 // we are done.
2813 //
2814 // If we are parsing after we reach EndOfStatement then this means we
2815 // are appending default values to the Operands list. This is only done
2816 // by custom parser, so we shouldn't continue on to the generic parsing.
2817 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
2818 getLexer().is(AsmToken::EndOfStatement))
2819 return ResTy;
2820
2821 ResTy = parseRegOrImm(Operands);
2822
2823 if (ResTy == MatchOperand_Success)
2824 return ResTy;
2825
2826 const auto &Tok = Parser.getTok();
2827 SMLoc S = Tok.getLoc();
2828
2829 const MCExpr *Expr = nullptr;
2830 if (!Parser.parseExpression(Expr)) {
2831 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2832 return MatchOperand_Success;
2833 }
2834
2835 // Possibly this is an instruction flag like 'gds'.
2836 if (Tok.getKind() == AsmToken::Identifier) {
2837 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
2838 Parser.Lex();
2839 return MatchOperand_Success;
2840 }
2841
2842 return MatchOperand_NoMatch;
2843}
2844
2845StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2846 // Clear any forced encodings from the previous instruction.
2847 setForcedEncodingSize(0);
2848 setForcedDPP(false);
2849 setForcedSDWA(false);
2850
2851 if (Name.endswith("_e64")) {
2852 setForcedEncodingSize(64);
2853 return Name.substr(0, Name.size() - 4);
2854 } else if (Name.endswith("_e32")) {
2855 setForcedEncodingSize(32);
2856 return Name.substr(0, Name.size() - 4);
2857 } else if (Name.endswith("_dpp")) {
2858 setForcedDPP(true);
2859 return Name.substr(0, Name.size() - 4);
2860 } else if (Name.endswith("_sdwa")) {
2861 setForcedSDWA(true);
2862 return Name.substr(0, Name.size() - 5);
2863 }
2864 return Name;
2865}
2866
2867bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2868 StringRef Name,
2869 SMLoc NameLoc, OperandVector &Operands) {
2870 // Add the instruction mnemonic
2871 Name = parseMnemonicSuffix(Name);
2872 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
2873
2874 while (!getLexer().is(AsmToken::EndOfStatement)) {
2875 OperandMatchResultTy Res = parseOperand(Operands, Name);
2876
2877 // Eat the comma or space if there is one.
2878 if (getLexer().is(AsmToken::Comma))
2879 Parser.Lex();
2880
2881 switch (Res) {
2882 case MatchOperand_Success: break;
2883 case MatchOperand_ParseFail:
2884 Error(getLexer().getLoc(), "failed parsing operand.");
2885 while (!getLexer().is(AsmToken::EndOfStatement)) {
2886 Parser.Lex();
2887 }
2888 return true;
2889 case MatchOperand_NoMatch:
2890 Error(getLexer().getLoc(), "not a valid operand.");
2891 while (!getLexer().is(AsmToken::EndOfStatement)) {
2892 Parser.Lex();
2893 }
2894 return true;
2895 }
2896 }
2897
2898 return false;
2899}
2900
2901//===----------------------------------------------------------------------===//
2902// Utility functions
2903//===----------------------------------------------------------------------===//
2904
2905OperandMatchResultTy
2906AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
2907 switch(getLexer().getKind()) {
2908 default: return MatchOperand_NoMatch;
2909 case AsmToken::Identifier: {
2910 StringRef Name = Parser.getTok().getString();
2911 if (!Name.equals(Prefix)) {
2912 return MatchOperand_NoMatch;
2913 }
2914
2915 Parser.Lex();
2916 if (getLexer().isNot(AsmToken::Colon))
2917 return MatchOperand_ParseFail;
2918
2919 Parser.Lex();
2920
2921 bool IsMinus = false;
2922 if (getLexer().getKind() == AsmToken::Minus) {
2923 Parser.Lex();
2924 IsMinus = true;
2925 }
2926
2927 if (getLexer().isNot(AsmToken::Integer))
2928 return MatchOperand_ParseFail;
2929
2930 if (getParser().parseAbsoluteExpression(Int))
2931 return MatchOperand_ParseFail;
2932
2933 if (IsMinus)
2934 Int = -Int;
2935 break;
2936 }
2937 }
2938 return MatchOperand_Success;
2939}
2940
2941OperandMatchResultTy
2942AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
2943 AMDGPUOperand::ImmTy ImmTy,
2944 bool (*ConvertResult)(int64_t&)) {
2945 SMLoc S = Parser.getTok().getLoc();
2946 int64_t Value = 0;
2947
2948 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
2949 if (Res != MatchOperand_Success)
2950 return Res;
2951
2952 if (ConvertResult && !ConvertResult(Value)) {
2953 return MatchOperand_ParseFail;
2954 }
2955
2956 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
2957 return MatchOperand_Success;
2958}
2959
2960OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
2961 const char *Prefix,
2962 OperandVector &Operands,
2963 AMDGPUOperand::ImmTy ImmTy,
2964 bool (*ConvertResult)(int64_t&)) {
2965 StringRef Name = Parser.getTok().getString();
2966 if (!Name.equals(Prefix))
2967 return MatchOperand_NoMatch;
2968
2969 Parser.Lex();
2970 if (getLexer().isNot(AsmToken::Colon))
2971 return MatchOperand_ParseFail;
2972
2973 Parser.Lex();
2974 if (getLexer().isNot(AsmToken::LBrac))
2975 return MatchOperand_ParseFail;
2976 Parser.Lex();
2977
2978 unsigned Val = 0;
2979 SMLoc S = Parser.getTok().getLoc();
2980
2981 // FIXME: How to verify the number of elements matches the number of src
2982 // operands?
2983 for (int I = 0; I < 4; ++I) {
2984 if (I != 0) {
2985 if (getLexer().is(AsmToken::RBrac))
2986 break;
2987
2988 if (getLexer().isNot(AsmToken::Comma))
2989 return MatchOperand_ParseFail;
2990 Parser.Lex();
2991 }
2992
2993 if (getLexer().isNot(AsmToken::Integer))
2994 return MatchOperand_ParseFail;
2995
2996 int64_t Op;
2997 if (getParser().parseAbsoluteExpression(Op))
2998 return MatchOperand_ParseFail;
2999
3000 if (Op != 0 && Op != 1)
3001 return MatchOperand_ParseFail;
3002 Val |= (Op << I);
3003 }
3004
3005 Parser.Lex();
3006 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3007 return MatchOperand_Success;
3008}
3009
3010OperandMatchResultTy
3011AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
3012 AMDGPUOperand::ImmTy ImmTy) {
3013 int64_t Bit = 0;
3014 SMLoc S = Parser.getTok().getLoc();
3015
3016 // We are at the end of the statement, and this is a default argument, so
3017 // use a default value.
3018 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3019 switch(getLexer().getKind()) {
3020 case AsmToken::Identifier: {
3021 StringRef Tok = Parser.getTok().getString();
3022 if (Tok == Name) {
3023 Bit = 1;
3024 Parser.Lex();
3025 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3026 Bit = 0;
3027 Parser.Lex();
3028 } else {
3029 return MatchOperand_NoMatch;
3030 }
3031 break;
3032 }
3033 default:
3034 return MatchOperand_NoMatch;
3035 }
3036 }
3037
3038 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
3039 return MatchOperand_Success;
3040}
3041
3042static void addOptionalImmOperand(
3043 MCInst& Inst, const OperandVector& Operands,
3044 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3045 AMDGPUOperand::ImmTy ImmT,
3046 int64_t Default = 0) {
3047 auto i = OptionalIdx.find(ImmT);
3048 if (i != OptionalIdx.end()) {
3049 unsigned Idx = i->second;
3050 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3051 } else {
3052 Inst.addOperand(MCOperand::createImm(Default));
3053 }
3054}
3055
3056OperandMatchResultTy
3057AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
3058 if (getLexer().isNot(AsmToken::Identifier)) {
3059 return MatchOperand_NoMatch;
3060 }
3061 StringRef Tok = Parser.getTok().getString();
3062 if (Tok != Prefix) {
3063 return MatchOperand_NoMatch;
3064 }
3065
3066 Parser.Lex();
3067 if (getLexer().isNot(AsmToken::Colon)) {
3068 return MatchOperand_ParseFail;
3069 }
3070
3071 Parser.Lex();
3072 if (getLexer().isNot(AsmToken::Identifier)) {
3073 return MatchOperand_ParseFail;
3074 }
3075
3076 Value = Parser.getTok().getString();
3077 return MatchOperand_Success;
3078}
3079
3080//===----------------------------------------------------------------------===//
3081// ds
3082//===----------------------------------------------------------------------===//
3083
3084void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3085 const OperandVector &Operands) {
3086 OptionalImmIndexMap OptionalIdx;
3087
3088 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3089 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3090
3091 // Add the register arguments
3092 if (Op.isReg()) {
3093 Op.addRegOperands(Inst, 1);
3094 continue;
3095 }
3096
3097 // Handle optional arguments
3098 OptionalIdx[Op.getImmTy()] = i;
3099 }
3100
3101 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
3102 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
3103 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
3104
3105 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3106}
3107
3108void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
3109 bool IsGdsHardcoded) {
3110 OptionalImmIndexMap OptionalIdx;
3111
3112 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3113 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3114
3115 // Add the register arguments
3116 if (Op.isReg()) {
3117 Op.addRegOperands(Inst, 1);
3118 continue;
3119 }
3120
3121 if (Op.isToken() && Op.getToken() == "gds") {
3122 IsGdsHardcoded = true;
3123 continue;
3124 }
3125
3126 // Handle optional arguments
3127 OptionalIdx[Op.getImmTy()] = i;
3128 }
3129
3130 AMDGPUOperand::ImmTy OffsetType =
3131 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
3132 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
3133 AMDGPUOperand::ImmTyOffset;
3134
3135 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
3136
3137 if (!IsGdsHardcoded) {
3138 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
3139 }
3140 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3141}
3142
3143void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
3144 OptionalImmIndexMap OptionalIdx;
3145
3146 unsigned OperandIdx[4];
3147 unsigned EnMask = 0;
3148 int SrcIdx = 0;
3149
3150 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3151 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3152
3153 // Add the register arguments
3154 if (Op.isReg()) {
3155 assert(SrcIdx < 4)(static_cast <bool> (SrcIdx < 4) ? void (0) : __assert_fail
("SrcIdx < 4", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3155, __extension__ __PRETTY_FUNCTION__))
;
3156 OperandIdx[SrcIdx] = Inst.size();
3157 Op.addRegOperands(Inst, 1);
3158 ++SrcIdx;
3159 continue;
3160 }
3161
3162 if (Op.isOff()) {
3163 assert(SrcIdx < 4)(static_cast <bool> (SrcIdx < 4) ? void (0) : __assert_fail
("SrcIdx < 4", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3163, __extension__ __PRETTY_FUNCTION__))
;
3164 OperandIdx[SrcIdx] = Inst.size();
3165 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
3166 ++SrcIdx;
3167 continue;
3168 }
3169
3170 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
3171 Op.addImmOperands(Inst, 1);
3172 continue;
3173 }
3174
3175 if (Op.isToken() && Op.getToken() == "done")
3176 continue;
3177
3178 // Handle optional arguments
3179 OptionalIdx[Op.getImmTy()] = i;
3180 }
3181
3182 assert(SrcIdx == 4)(static_cast <bool> (SrcIdx == 4) ? void (0) : __assert_fail
("SrcIdx == 4", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3182, __extension__ __PRETTY_FUNCTION__))
;
3183
3184 bool Compr = false;
3185 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
3186 Compr = true;
3187 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
3188 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
3189 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
3190 }
3191
3192 for (auto i = 0; i < SrcIdx; ++i) {
3193 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
3194 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
3195 }
3196 }
3197
3198 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
3199 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
3200
3201 Inst.addOperand(MCOperand::createImm(EnMask));
3202}
3203
3204//===----------------------------------------------------------------------===//
3205// s_waitcnt
3206//===----------------------------------------------------------------------===//
3207
3208static bool
3209encodeCnt(
3210 const AMDGPU::IsaInfo::IsaVersion ISA,
3211 int64_t &IntVal,
3212 int64_t CntVal,
3213 bool Saturate,
3214 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
3215 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
3216{
3217 bool Failed = false;
3218
3219 IntVal = encode(ISA, IntVal, CntVal);
3220 if (CntVal != decode(ISA, IntVal)) {
3221 if (Saturate) {
3222 IntVal = encode(ISA, IntVal, -1);
3223 } else {
3224 Failed = true;
3225 }
3226 }
3227 return Failed;
3228}
3229
3230bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3231 StringRef CntName = Parser.getTok().getString();
3232 int64_t CntVal;
3233
3234 Parser.Lex();
3235 if (getLexer().isNot(AsmToken::LParen))
3236 return true;
3237
3238 Parser.Lex();
3239 if (getLexer().isNot(AsmToken::Integer))
3240 return true;
3241
3242 SMLoc ValLoc = Parser.getTok().getLoc();
3243 if (getParser().parseAbsoluteExpression(CntVal))
3244 return true;
3245
3246 AMDGPU::IsaInfo::IsaVersion ISA =
3247 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
3248
3249 bool Failed = true;
3250 bool Sat = CntName.endswith("_sat");
3251
3252 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3253 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3254 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3255 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3256 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3257 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3258 }
3259
3260 if (Failed) {
3261 Error(ValLoc, "too large value for " + CntName);
3262 return true;
3263 }
3264
3265 if (getLexer().isNot(AsmToken::RParen)) {
3266 return true;
3267 }
3268
3269 Parser.Lex();
3270 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3271 const AsmToken NextToken = getLexer().peekTok();
3272 if (NextToken.is(AsmToken::Identifier)) {
3273 Parser.Lex();
3274 }
3275 }
3276
3277 return false;
3278}
3279
3280OperandMatchResultTy
3281AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
3282 AMDGPU::IsaInfo::IsaVersion ISA =
3283 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
3284 int64_t Waitcnt = getWaitcntBitMask(ISA);
3285 SMLoc S = Parser.getTok().getLoc();
3286
3287 switch(getLexer().getKind()) {
3288 default: return MatchOperand_ParseFail;
3289 case AsmToken::Integer:
3290 // The operand can be an integer value.
3291 if (getParser().parseAbsoluteExpression(Waitcnt))
3292 return MatchOperand_ParseFail;
3293 break;
3294
3295 case AsmToken::Identifier:
3296 do {
3297 if (parseCnt(Waitcnt))
3298 return MatchOperand_ParseFail;
3299 } while(getLexer().isNot(AsmToken::EndOfStatement));
3300 break;
3301 }
3302 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
3303 return MatchOperand_Success;
3304}
3305
3306bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3307 int64_t &Width) {
3308 using namespace llvm::AMDGPU::Hwreg;
3309
3310 if (Parser.getTok().getString() != "hwreg")
3311 return true;
3312 Parser.Lex();
3313
3314 if (getLexer().isNot(AsmToken::LParen))
3315 return true;
3316 Parser.Lex();
3317
3318 if (getLexer().is(AsmToken::Identifier)) {
3319 HwReg.IsSymbolic = true;
3320 HwReg.Id = ID_UNKNOWN_;
3321 const StringRef tok = Parser.getTok().getString();
3322 int Last = ID_SYMBOLIC_LAST_;
3323 if (isSI() || isCI() || isVI())
3324 Last = ID_SYMBOLIC_FIRST_GFX9_;
3325 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
3326 if (tok == IdSymbolic[i]) {
3327 HwReg.Id = i;
3328 break;
3329 }
3330 }
3331 Parser.Lex();
3332 } else {
3333 HwReg.IsSymbolic = false;
3334 if (getLexer().isNot(AsmToken::Integer))
3335 return true;
3336 if (getParser().parseAbsoluteExpression(HwReg.Id))
3337 return true;
3338 }
3339
3340 if (getLexer().is(AsmToken::RParen)) {
3341 Parser.Lex();
3342 return false;
3343 }
3344
3345 // optional params
3346 if (getLexer().isNot(AsmToken::Comma))
3347 return true;
3348 Parser.Lex();
3349
3350 if (getLexer().isNot(AsmToken::Integer))
3351 return true;
3352 if (getParser().parseAbsoluteExpression(Offset))
3353 return true;
3354
3355 if (getLexer().isNot(AsmToken::Comma))
3356 return true;
3357 Parser.Lex();
3358
3359 if (getLexer().isNot(AsmToken::Integer))
3360 return true;
3361 if (getParser().parseAbsoluteExpression(Width))
3362 return true;
3363
3364 if (getLexer().isNot(AsmToken::RParen))
3365 return true;
3366 Parser.Lex();
3367
3368 return false;
3369}
3370
3371OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
3372 using namespace llvm::AMDGPU::Hwreg;
3373
3374 int64_t Imm16Val = 0;
3375 SMLoc S = Parser.getTok().getLoc();
3376
3377 switch(getLexer().getKind()) {
3378 default: return MatchOperand_NoMatch;
3379 case AsmToken::Integer:
3380 // The operand can be an integer value.
3381 if (getParser().parseAbsoluteExpression(Imm16Val))
3382 return MatchOperand_NoMatch;
3383 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
3384 Error(S, "invalid immediate: only 16-bit values are legal");
3385 // Do not return error code, but create an imm operand anyway and proceed
3386 // to the next operand, if any. That avoids unneccessary error messages.
3387 }
3388 break;
3389
3390 case AsmToken::Identifier: {
3391 OperandInfoTy HwReg(ID_UNKNOWN_);
3392 int64_t Offset = OFFSET_DEFAULT_;
3393 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3394 if (parseHwregConstruct(HwReg, Offset, Width))
3395 return MatchOperand_ParseFail;
3396 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3397 if (HwReg.IsSymbolic)
3398 Error(S, "invalid symbolic name of hardware register");
3399 else
3400 Error(S, "invalid code of hardware register: only 6-bit values are legal");
3401 }
3402 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
3403 Error(S, "invalid bit offset: only 5-bit values are legal");
3404 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
3405 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
3406 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
3407 }
3408 break;
3409 }
3410 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
3411 return MatchOperand_Success;
3412}
3413
3414bool AMDGPUOperand::isSWaitCnt() const {
3415 return isImm();
3416}
3417
3418bool AMDGPUOperand::isHwreg() const {
3419 return isImmTy(ImmTyHwreg);
3420}
3421
3422bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
3423 using namespace llvm::AMDGPU::SendMsg;
3424
3425 if (Parser.getTok().getString() != "sendmsg")
3426 return true;
3427 Parser.Lex();
3428
3429 if (getLexer().isNot(AsmToken::LParen))
3430 return true;
3431 Parser.Lex();
3432
3433 if (getLexer().is(AsmToken::Identifier)) {
3434 Msg.IsSymbolic = true;
3435 Msg.Id = ID_UNKNOWN_;
3436 const std::string tok = Parser.getTok().getString();
3437 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3438 switch(i) {
3439 default: continue; // Omit gaps.
3440 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3441 }
3442 if (tok == IdSymbolic[i]) {
3443 Msg.Id = i;
3444 break;
3445 }
3446 }
3447 Parser.Lex();
3448 } else {
3449 Msg.IsSymbolic = false;
3450 if (getLexer().isNot(AsmToken::Integer))
3451 return true;
3452 if (getParser().parseAbsoluteExpression(Msg.Id))
3453 return true;
3454 if (getLexer().is(AsmToken::Integer))
3455 if (getParser().parseAbsoluteExpression(Msg.Id))
3456 Msg.Id = ID_UNKNOWN_;
3457 }
3458 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3459 return false;
3460
3461 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3462 if (getLexer().isNot(AsmToken::RParen))
3463 return true;
3464 Parser.Lex();
3465 return false;
3466 }
3467
3468 if (getLexer().isNot(AsmToken::Comma))
3469 return true;
3470 Parser.Lex();
3471
3472 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)(static_cast <bool> (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE
|| Msg.Id == ID_SYSMSG) ? void (0) : __assert_fail ("Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3472, __extension__ __PRETTY_FUNCTION__))
;
3473 Operation.Id = ID_UNKNOWN_;
3474 if (getLexer().is(AsmToken::Identifier)) {
3475 Operation.IsSymbolic = true;
3476 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3477 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3478 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
3479 const StringRef Tok = Parser.getTok().getString();
3480 for (int i = F; i < L; ++i) {
3481 if (Tok == S[i]) {
3482 Operation.Id = i;
3483 break;
3484 }
3485 }
3486 Parser.Lex();
3487 } else {
3488 Operation.IsSymbolic = false;
3489 if (getLexer().isNot(AsmToken::Integer))
3490 return true;
3491 if (getParser().parseAbsoluteExpression(Operation.Id))
3492 return true;
3493 }
3494
3495 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3496 // Stream id is optional.
3497 if (getLexer().is(AsmToken::RParen)) {
3498 Parser.Lex();
3499 return false;
3500 }
3501
3502 if (getLexer().isNot(AsmToken::Comma))
3503 return true;
3504 Parser.Lex();
3505
3506 if (getLexer().isNot(AsmToken::Integer))
3507 return true;
3508 if (getParser().parseAbsoluteExpression(StreamId))
3509 return true;
3510 }
3511
3512 if (getLexer().isNot(AsmToken::RParen))
3513 return true;
3514 Parser.Lex();
3515 return false;
3516}
3517
3518OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3519 if (getLexer().getKind() != AsmToken::Identifier)
3520 return MatchOperand_NoMatch;
3521
3522 StringRef Str = Parser.getTok().getString();
3523 int Slot = StringSwitch<int>(Str)
3524 .Case("p10", 0)
3525 .Case("p20", 1)
3526 .Case("p0", 2)
3527 .Default(-1);
3528
3529 SMLoc S = Parser.getTok().getLoc();
3530 if (Slot == -1)
3531 return MatchOperand_ParseFail;
3532
3533 Parser.Lex();
3534 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3535 AMDGPUOperand::ImmTyInterpSlot));
3536 return MatchOperand_Success;
3537}
3538
3539OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3540 if (getLexer().getKind() != AsmToken::Identifier)
3541 return MatchOperand_NoMatch;
3542
3543 StringRef Str = Parser.getTok().getString();
3544 if (!Str.startswith("attr"))
3545 return MatchOperand_NoMatch;
3546
3547 StringRef Chan = Str.take_back(2);
3548 int AttrChan = StringSwitch<int>(Chan)
3549 .Case(".x", 0)
3550 .Case(".y", 1)
3551 .Case(".z", 2)
3552 .Case(".w", 3)
3553 .Default(-1);
3554 if (AttrChan == -1)
3555 return MatchOperand_ParseFail;
3556
3557 Str = Str.drop_back(2).drop_front(4);
3558
3559 uint8_t Attr;
3560 if (Str.getAsInteger(10, Attr))
3561 return MatchOperand_ParseFail;
3562
3563 SMLoc S = Parser.getTok().getLoc();
3564 Parser.Lex();
3565 if (Attr > 63) {
3566 Error(S, "out of bounds attr");
3567 return MatchOperand_Success;
3568 }
3569
3570 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3571
3572 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3573 AMDGPUOperand::ImmTyInterpAttr));
3574 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3575 AMDGPUOperand::ImmTyAttrChan));
3576 return MatchOperand_Success;
3577}
3578
3579void AMDGPUAsmParser::errorExpTgt() {
3580 Error(Parser.getTok().getLoc(), "invalid exp target");
3581}
3582
3583OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3584 uint8_t &Val) {
3585 if (Str == "null") {
3586 Val = 9;
3587 return MatchOperand_Success;
3588 }
3589
3590 if (Str.startswith("mrt")) {
3591 Str = Str.drop_front(3);
3592 if (Str == "z") { // == mrtz
3593 Val = 8;
3594 return MatchOperand_Success;
3595 }
3596
3597 if (Str.getAsInteger(10, Val))
3598 return MatchOperand_ParseFail;
3599
3600 if (Val > 7)
3601 errorExpTgt();
3602
3603 return MatchOperand_Success;
3604 }
3605
3606 if (Str.startswith("pos")) {
3607 Str = Str.drop_front(3);
3608 if (Str.getAsInteger(10, Val))
3609 return MatchOperand_ParseFail;
3610
3611 if (Val > 3)
3612 errorExpTgt();
3613
3614 Val += 12;
3615 return MatchOperand_Success;
3616 }
3617
3618 if (Str.startswith("param")) {
3619 Str = Str.drop_front(5);
3620 if (Str.getAsInteger(10, Val))
3621 return MatchOperand_ParseFail;
3622
3623 if (Val >= 32)
3624 errorExpTgt();
3625
3626 Val += 32;
3627 return MatchOperand_Success;
3628 }
3629
3630 if (Str.startswith("invalid_target_")) {
3631 Str = Str.drop_front(15);
3632 if (Str.getAsInteger(10, Val))
3633 return MatchOperand_ParseFail;
3634
3635 errorExpTgt();
3636 return MatchOperand_Success;
3637 }
3638
3639 return MatchOperand_NoMatch;
3640}
3641
3642OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3643 uint8_t Val;
3644 StringRef Str = Parser.getTok().getString();
3645
3646 auto Res = parseExpTgtImpl(Str, Val);
3647 if (Res != MatchOperand_Success)
3648 return Res;
3649
3650 SMLoc S = Parser.getTok().getLoc();
3651 Parser.Lex();
3652
3653 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3654 AMDGPUOperand::ImmTyExpTgt));
3655 return MatchOperand_Success;
3656}
3657
3658OperandMatchResultTy
3659AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3660 using namespace llvm::AMDGPU::SendMsg;
3661
3662 int64_t Imm16Val = 0;
3663 SMLoc S = Parser.getTok().getLoc();
3664
3665 switch(getLexer().getKind()) {
3666 default:
3667 return MatchOperand_NoMatch;
3668 case AsmToken::Integer:
3669 // The operand can be an integer value.
3670 if (getParser().parseAbsoluteExpression(Imm16Val))
3671 return MatchOperand_NoMatch;
3672 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
3673 Error(S, "invalid immediate: only 16-bit values are legal");
3674 // Do not return error code, but create an imm operand anyway and proceed
3675 // to the next operand, if any. That avoids unneccessary error messages.
3676 }
3677 break;
3678 case AsmToken::Identifier: {
3679 OperandInfoTy Msg(ID_UNKNOWN_);
3680 OperandInfoTy Operation(OP_UNKNOWN_);
3681 int64_t StreamId = STREAM_ID_DEFAULT_;
3682 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3683 return MatchOperand_ParseFail;
3684 do {
3685 // Validate and encode message ID.
3686 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3687 || Msg.Id == ID_SYSMSG)) {
3688 if (Msg.IsSymbolic)
3689 Error(S, "invalid/unsupported symbolic name of message");
3690 else
3691 Error(S, "invalid/unsupported code of message");
3692 break;
3693 }
3694 Imm16Val = (Msg.Id << ID_SHIFT_);
3695 // Validate and encode operation ID.
3696 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3697 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3698 if (Operation.IsSymbolic)
3699 Error(S, "invalid symbolic name of GS_OP");
3700 else
3701 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3702 break;
3703 }
3704 if (Operation.Id == OP_GS_NOP
3705 && Msg.Id != ID_GS_DONE) {
3706 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3707 break;
3708 }
3709 Imm16Val |= (Operation.Id << OP_SHIFT_);
3710 }
3711 if (Msg.Id == ID_SYSMSG) {
3712 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3713 if (Operation.IsSymbolic)
3714 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3715 else
3716 Error(S, "invalid/unsupported code of SYSMSG_OP");
3717 break;
3718 }
3719 Imm16Val |= (Operation.Id << OP_SHIFT_);
3720 }
3721 // Validate and encode stream ID.
3722 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3723 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3724 Error(S, "invalid stream id: only 2-bit values are legal");
3725 break;
3726 }
3727 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3728 }
3729 } while (false);
3730 }
3731 break;
3732 }
3733 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
3734 return MatchOperand_Success;
3735}
3736
3737bool AMDGPUOperand::isSendMsg() const {
3738 return isImmTy(ImmTySendMsg);
3739}
3740
3741//===----------------------------------------------------------------------===//
3742// parser helpers
3743//===----------------------------------------------------------------------===//
3744
3745bool
3746AMDGPUAsmParser::trySkipId(const StringRef Id) {
3747 if (getLexer().getKind() == AsmToken::Identifier &&
3748 Parser.getTok().getString() == Id) {
3749 Parser.Lex();
3750 return true;
3751 }
3752 return false;
3753}
3754
3755bool
3756AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
3757 if (getLexer().getKind() == Kind) {
3758 Parser.Lex();
3759 return true;
3760 }
3761 return false;
3762}
3763
3764bool
3765AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
3766 const StringRef ErrMsg) {
3767 if (!trySkipToken(Kind)) {
3768 Error(Parser.getTok().getLoc(), ErrMsg);
3769 return false;
3770 }
3771 return true;
3772}
3773
3774bool
3775AMDGPUAsmParser::parseExpr(int64_t &Imm) {
3776 return !getParser().parseAbsoluteExpression(Imm);
3777}
3778
3779bool
3780AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
3781 SMLoc S = Parser.getTok().getLoc();
3782 if (getLexer().getKind() == AsmToken::String) {
3783 Val = Parser.getTok().getStringContents();
3784 Parser.Lex();
3785 return true;
3786 } else {
3787 Error(S, ErrMsg);
3788 return false;
3789 }
3790}
3791
3792//===----------------------------------------------------------------------===//
3793// swizzle
3794//===----------------------------------------------------------------------===//
3795
3796LLVM_READNONE__attribute__((__const__))
3797static unsigned
3798encodeBitmaskPerm(const unsigned AndMask,
3799 const unsigned OrMask,
3800 const unsigned XorMask) {
3801 using namespace llvm::AMDGPU::Swizzle;
3802
3803 return BITMASK_PERM_ENC |
3804 (AndMask << BITMASK_AND_SHIFT) |
3805 (OrMask << BITMASK_OR_SHIFT) |
3806 (XorMask << BITMASK_XOR_SHIFT);
3807}
3808
3809bool
3810AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
3811 const unsigned MinVal,
3812 const unsigned MaxVal,
3813 const StringRef ErrMsg) {
3814 for (unsigned i = 0; i < OpNum; ++i) {
3815 if (!skipToken(AsmToken::Comma, "expected a comma")){
3816 return false;
3817 }
3818 SMLoc ExprLoc = Parser.getTok().getLoc();
3819 if (!parseExpr(Op[i])) {
3820 return false;
3821 }
3822 if (Op[i] < MinVal || Op[i] > MaxVal) {
3823 Error(ExprLoc, ErrMsg);
3824 return false;
3825 }
3826 }
3827
3828 return true;
3829}
3830
3831bool
3832AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
3833 using namespace llvm::AMDGPU::Swizzle;
3834
3835 int64_t Lane[LANE_NUM];
3836 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
3837 "expected a 2-bit lane id")) {
3838 Imm = QUAD_PERM_ENC;
3839 for (auto i = 0; i < LANE_NUM; ++i) {
3840 Imm |= Lane[i] << (LANE_SHIFT * i);
3841 }
3842 return true;
3843 }
3844 return false;
3845}
3846
3847bool
3848AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
3849 using namespace llvm::AMDGPU::Swizzle;
3850
3851 SMLoc S = Parser.getTok().getLoc();
3852 int64_t GroupSize;
3853 int64_t LaneIdx;
3854
3855 if (!parseSwizzleOperands(1, &GroupSize,
3856 2, 32,
3857 "group size must be in the interval [2,32]")) {
3858 return false;
3859 }
3860 if (!isPowerOf2_64(GroupSize)) {
3861 Error(S, "group size must be a power of two");
3862 return false;
3863 }
3864 if (parseSwizzleOperands(1, &LaneIdx,
3865 0, GroupSize - 1,
3866 "lane id must be in the interval [0,group size - 1]")) {
3867 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
3868 return true;
3869 }
3870 return false;
3871}
3872
3873bool
3874AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
3875 using namespace llvm::AMDGPU::Swizzle;
3876
3877 SMLoc S = Parser.getTok().getLoc();
3878 int64_t GroupSize;
3879
3880 if (!parseSwizzleOperands(1, &GroupSize,
3881 2, 32, "group size must be in the interval [2,32]")) {
3882 return false;
3883 }
3884 if (!isPowerOf2_64(GroupSize)) {
3885 Error(S, "group size must be a power of two");
3886 return false;
3887 }
3888
3889 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
3890 return true;
3891}
3892
3893bool
3894AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
3895 using namespace llvm::AMDGPU::Swizzle;
3896
3897 SMLoc S = Parser.getTok().getLoc();
3898 int64_t GroupSize;
3899
3900 if (!parseSwizzleOperands(1, &GroupSize,
3901 1, 16, "group size must be in the interval [1,16]")) {
3902 return false;
3903 }
3904 if (!isPowerOf2_64(GroupSize)) {
3905 Error(S, "group size must be a power of two");
3906 return false;
3907 }
3908
3909 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
3910 return true;
3911}
3912
3913bool
3914AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
3915 using namespace llvm::AMDGPU::Swizzle;
3916
3917 if (!skipToken(AsmToken::Comma, "expected a comma")) {
3918 return false;
3919 }
3920
3921 StringRef Ctl;
3922 SMLoc StrLoc = Parser.getTok().getLoc();
3923 if (!parseString(Ctl)) {
3924 return false;
3925 }
3926 if (Ctl.size() != BITMASK_WIDTH) {
3927 Error(StrLoc, "expected a 5-character mask");
3928 return false;
3929 }
3930
3931 unsigned AndMask = 0;
3932 unsigned OrMask = 0;
3933 unsigned XorMask = 0;
3934
3935 for (size_t i = 0; i < Ctl.size(); ++i) {
3936 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
3937 switch(Ctl[i]) {
3938 default:
3939 Error(StrLoc, "invalid mask");
3940 return false;
3941 case '0':
3942 break;
3943 case '1':
3944 OrMask |= Mask;
3945 break;
3946 case 'p':
3947 AndMask |= Mask;
3948 break;
3949 case 'i':
3950 AndMask |= Mask;
3951 XorMask |= Mask;
3952 break;
3953 }
3954 }
3955
3956 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
3957 return true;
3958}
3959
3960bool
3961AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
3962
3963 SMLoc OffsetLoc = Parser.getTok().getLoc();
3964
3965 if (!parseExpr(Imm)) {
3966 return false;
3967 }
3968 if (!isUInt<16>(Imm)) {
3969 Error(OffsetLoc, "expected a 16-bit offset");
3970 return false;
3971 }
3972 return true;
3973}
3974
3975bool
3976AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
3977 using namespace llvm::AMDGPU::Swizzle;
3978
3979 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
3980
3981 SMLoc ModeLoc = Parser.getTok().getLoc();
3982 bool Ok = false;
3983
3984 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
3985 Ok = parseSwizzleQuadPerm(Imm);
3986 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
3987 Ok = parseSwizzleBitmaskPerm(Imm);
3988 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
3989 Ok = parseSwizzleBroadcast(Imm);
3990 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
3991 Ok = parseSwizzleSwap(Imm);
3992 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
3993 Ok = parseSwizzleReverse(Imm);
3994 } else {
3995 Error(ModeLoc, "expected a swizzle mode");
3996 }
3997
3998 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
3999 }
4000
4001 return false;
4002}
4003
4004OperandMatchResultTy
4005AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4006 SMLoc S = Parser.getTok().getLoc();
4007 int64_t Imm = 0;
4008
4009 if (trySkipId("offset")) {
4010
4011 bool Ok = false;
4012 if (skipToken(AsmToken::Colon, "expected a colon")) {
4013 if (trySkipId("swizzle")) {
4014 Ok = parseSwizzleMacro(Imm);
4015 } else {
4016 Ok = parseSwizzleOffset(Imm);
4017 }
4018 }
4019
4020 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4021
4022 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4023 } else {
4024 // Swizzle "offset" operand is optional.
4025 // If it is omitted, try parsing other optional operands.
4026 return parseOptionalOpr(Operands);
4027 }
4028}
4029
4030bool
4031AMDGPUOperand::isSwizzle() const {
4032 return isImmTy(ImmTySwizzle);
4033}
4034
4035//===----------------------------------------------------------------------===//
4036// sopp branch targets
4037//===----------------------------------------------------------------------===//
4038
4039OperandMatchResultTy
4040AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
4041 SMLoc S = Parser.getTok().getLoc();
4042
4043 switch (getLexer().getKind()) {
4044 default: return MatchOperand_ParseFail;
4045 case AsmToken::Integer: {
4046 int64_t Imm;
4047 if (getParser().parseAbsoluteExpression(Imm))
4048 return MatchOperand_ParseFail;
4049 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
4050 return MatchOperand_Success;
4051 }
4052
4053 case AsmToken::Identifier:
4054 Operands.push_back(AMDGPUOperand::CreateExpr(this,
4055 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
4056 Parser.getTok().getString()), getContext()), S));
4057 Parser.Lex();
4058 return MatchOperand_Success;
4059 }
4060}
4061
4062//===----------------------------------------------------------------------===//
4063// mubuf
4064//===----------------------------------------------------------------------===//
4065
4066AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
4067 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
4068}
4069
4070AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
4071 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
4072}
4073
4074AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
4075 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
4076}
4077
4078AMDGPUOperand::Ptr AMDGPUAsmParser::defaultD16() const {
4079 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyD16);
4080}
4081
4082void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
4083 const OperandVector &Operands,
4084 bool IsAtomic, bool IsAtomicReturn) {
4085 OptionalImmIndexMap OptionalIdx;
4086 assert(IsAtomicReturn ? IsAtomic : true)(static_cast <bool> (IsAtomicReturn ? IsAtomic : true) ?
void (0) : __assert_fail ("IsAtomicReturn ? IsAtomic : true"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4086, __extension__ __PRETTY_FUNCTION__))
;
4087
4088 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4089 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4090
4091 // Add the register arguments
4092 if (Op.isReg()) {
4093 Op.addRegOperands(Inst, 1);
4094 continue;
4095 }
4096
4097 // Handle the case where soffset is an immediate
4098 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4099 Op.addImmOperands(Inst, 1);
4100 continue;
4101 }
4102
4103 // Handle tokens like 'offen' which are sometimes hard-coded into the
4104 // asm string. There are no MCInst operands for these.
4105 if (Op.isToken()) {
4106 continue;
4107 }
4108 assert(Op.isImm())(static_cast <bool> (Op.isImm()) ? void (0) : __assert_fail
("Op.isImm()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4108, __extension__ __PRETTY_FUNCTION__))
;
4109
4110 // Handle optional arguments
4111 OptionalIdx[Op.getImmTy()] = i;
4112 }
4113
4114 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
4115 if (IsAtomicReturn) {
4116 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
4117 Inst.insert(I, *I);
4118 }
4119
4120 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
4121 if (!IsAtomic) { // glc is hard-coded.
4122 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4123 }
4124 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4125 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4126}
4127
4128void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
4129 OptionalImmIndexMap OptionalIdx;
4130
4131 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4132 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4133
4134 // Add the register arguments
4135 if (Op.isReg()) {
4136 Op.addRegOperands(Inst, 1);
4137 continue;
4138 }
4139
4140 // Handle the case where soffset is an immediate
4141 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4142 Op.addImmOperands(Inst, 1);
4143 continue;
4144 }
4145
4146 // Handle tokens like 'offen' which are sometimes hard-coded into the
4147 // asm string. There are no MCInst operands for these.
4148 if (Op.isToken()) {
4149 continue;
4150 }
4151 assert(Op.isImm())(static_cast <bool> (Op.isImm()) ? void (0) : __assert_fail
("Op.isImm()", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4151, __extension__ __PRETTY_FUNCTION__))
;
4152
4153 // Handle optional arguments
4154 OptionalIdx[Op.getImmTy()] = i;
4155 }
4156
4157 addOptionalImmOperand(Inst, Operands, OptionalIdx,
4158 AMDGPUOperand::ImmTyOffset);
4159 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDFMT);
4160 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyNFMT);
4161 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4162 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4163 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4164}
4165
4166//===----------------------------------------------------------------------===//
4167// mimg
4168//===----------------------------------------------------------------------===//
4169
4170void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
4171 bool IsAtomic) {
4172 unsigned I = 1;
4173 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4174 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4175 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4176 }
4177
4178 if (IsAtomic) {
4179 // Add src, same as dst
4180 assert(Desc.getNumDefs() == 1)(static_cast <bool> (Desc.getNumDefs() == 1) ? void (0)
: __assert_fail ("Desc.getNumDefs() == 1", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4180, __extension__ __PRETTY_FUNCTION__))
;
4181 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
4182 }
4183
4184 OptionalImmIndexMap OptionalIdx;
4185
4186 for (unsigned E = Operands.size(); I != E; ++I) {
4187 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4188
4189 // Add the register arguments
4190 if (Op.isReg()) {
4191 Op.addRegOperands(Inst, 1);
4192 } else if (Op.isImmModifier()) {
4193 OptionalIdx[Op.getImmTy()] = I;
4194 } else {
4195 llvm_unreachable("unexpected operand type")::llvm::llvm_unreachable_internal("unexpected operand type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4195)
;
4196 }
4197 }
4198
4199 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
4200 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
4201 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4202 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4203 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
4204 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4205 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
4206 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
4207}
4208
4209void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
4210 cvtMIMG(Inst, Operands, true);
4211}
4212
4213AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
4214 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
4215}
4216
4217AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
4218 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
4219}
4220
4221AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
4222 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
4223}
4224
4225AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
4226 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
4227}
4228
4229AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
4230 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
4231}
4232
4233//===----------------------------------------------------------------------===//
4234// smrd
4235//===----------------------------------------------------------------------===//
4236
4237bool AMDGPUOperand::isSMRDOffset8() const {
4238 return isImm() && isUInt<8>(getImm());
4239}
4240
4241bool AMDGPUOperand::isSMRDOffset20() const {
4242 return isImm() && isUInt<20>(getImm());
4243}
4244
4245bool AMDGPUOperand::isSMRDLiteralOffset() const {
4246 // 32-bit literals are only supported on CI and we only want to use them
4247 // when the offset is > 8-bits.
4248 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
4249}
4250
4251AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
4252 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4253}
4254
4255AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
4256 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4257}
4258
4259AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
4260 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4261}
4262
4263AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
4264 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4265}
4266
4267AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
4268 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4269}
4270
4271//===----------------------------------------------------------------------===//
4272// vop3
4273//===----------------------------------------------------------------------===//
4274
4275static bool ConvertOmodMul(int64_t &Mul) {
4276 if (Mul != 1 && Mul != 2 && Mul != 4)
4277 return false;
4278
4279 Mul >>= 1;
4280 return true;
4281}
4282
4283static bool ConvertOmodDiv(int64_t &Div) {
4284 if (Div == 1) {
4285 Div = 0;
4286 return true;
4287 }
4288
4289 if (Div == 2) {
4290 Div = 3;
4291 return true;
4292 }
4293
4294 return false;
4295}
4296
4297static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4298 if (BoundCtrl == 0) {
4299 BoundCtrl = 1;
4300 return true;
4301 }
4302
4303 if (BoundCtrl == -1) {
4304 BoundCtrl = 0;
4305 return true;
4306 }
4307
4308 return false;
4309}
4310
4311// Note: the order in this table matches the order of operands in AsmString.
4312static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4313 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4314 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4315 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4316 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4317 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4318 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
4319 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
4320 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
4321 {"dfmt", AMDGPUOperand::ImmTyDFMT, false, nullptr},
4322 {"nfmt", AMDGPUOperand::ImmTyNFMT, false, nullptr},
4323 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4324 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4325 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
4326 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
4327 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
4328 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4329 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4330 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4331 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
4332 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
4333 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
4334 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4335 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4336 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4337 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
4338 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4339 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4340 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
4341 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
4342 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
4343 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
4344 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4345 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4346 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4347 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
4348};
4349
4350OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
4351 unsigned size = Operands.size();
4352 assert(size > 0)(static_cast <bool> (size > 0) ? void (0) : __assert_fail
("size > 0", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4352, __extension__ __PRETTY_FUNCTION__))
;
4353
4354 OperandMatchResultTy res = parseOptionalOpr(Operands);
4355
4356 // This is a hack to enable hardcoded mandatory operands which follow
4357 // optional operands.
4358 //
4359 // Current design assumes that all operands after the first optional operand
4360 // are also optional. However implementation of some instructions violates
4361 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
4362 //
4363 // To alleviate this problem, we have to (implicitly) parse extra operands
4364 // to make sure autogenerated parser of custom operands never hit hardcoded
4365 // mandatory operands.
4366
4367 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
4368
4369 // We have parsed the first optional operand.
4370 // Parse as many operands as necessary to skip all mandatory operands.
4371
4372 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
4373 if (res != MatchOperand_Success ||
4374 getLexer().is(AsmToken::EndOfStatement)) break;
4375 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
4376 res = parseOptionalOpr(Operands);
4377 }
4378 }
4379
4380 return res;
4381}
4382
4383OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
4384 OperandMatchResultTy res;
4385 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4386 // try to parse any optional operand here
4387 if (Op.IsBit) {
4388 res = parseNamedBit(Op.Name, Operands, Op.Type);
4389 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4390 res = parseOModOperand(Operands);
4391 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4392 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4393 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4394 res = parseSDWASel(Operands, Op.Name, Op.Type);
4395 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4396 res = parseSDWADstUnused(Operands);
4397 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4398 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4399 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4400 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4401 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4402 Op.ConvertResult);
4403 } else {
4404 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4405 }
4406 if (res != MatchOperand_NoMatch) {
4407 return res;
4408 }
4409 }
4410 return MatchOperand_NoMatch;
4411}
4412
4413OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
4414 StringRef Name = Parser.getTok().getString();
4415 if (Name == "mul") {
4416 return parseIntWithPrefix("mul", Operands,
4417 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
4418 }
4419
4420 if (Name == "div") {
4421 return parseIntWithPrefix("div", Operands,
4422 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4423 }
4424
4425 return MatchOperand_NoMatch;
4426}
4427
4428void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4429 cvtVOP3P(Inst, Operands);
4430
4431 int Opc = Inst.getOpcode();
4432
4433 int SrcNum;
4434 const int Ops[] = { AMDGPU::OpName::src0,
4435 AMDGPU::OpName::src1,
4436 AMDGPU::OpName::src2 };
4437 for (SrcNum = 0;
4438 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4439 ++SrcNum);
4440 assert(SrcNum > 0)(static_cast <bool> (SrcNum > 0) ? void (0) : __assert_fail
("SrcNum > 0", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4440, __extension__ __PRETTY_FUNCTION__))
;
4441
4442 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4443 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4444
4445 if ((OpSel & (1 << SrcNum)) != 0) {
4446 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4447 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4448 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4449 }
4450}
4451
4452static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4453 // 1. This operand is input modifiers
4454 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4455 // 2. This is not last operand
4456 && Desc.NumOperands > (OpNum + 1)
4457 // 3. Next operand is register class
4458 && Desc.OpInfo[OpNum + 1].RegClass != -1
4459 // 4. Next register is not tied to any other operand
4460 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4461}
4462
4463void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
4464{
4465 OptionalImmIndexMap OptionalIdx;
4466 unsigned Opc = Inst.getOpcode();
4467
4468 unsigned I = 1;
4469 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4470 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4471 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4472 }
4473
4474 for (unsigned E = Operands.size(); I != E; ++I) {
4475 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4476 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4477 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4478 } else if (Op.isInterpSlot() ||
4479 Op.isInterpAttr() ||
4480 Op.isAttrChan()) {
4481 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4482 } else if (Op.isImmModifier()) {
4483 OptionalIdx[Op.getImmTy()] = I;
4484 } else {
4485 llvm_unreachable("unhandled operand type")::llvm::llvm_unreachable_internal("unhandled operand type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4485)
;
4486 }
4487 }
4488
4489 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4490 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4491 }
4492
4493 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4494 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4495 }
4496
4497 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4498 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4499 }
4500}
4501
4502void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4503 OptionalImmIndexMap &OptionalIdx) {
4504 unsigned Opc = Inst.getOpcode();
4505
4506 unsigned I = 1;
4507 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4508 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4509 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4510 }
4511
4512 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4513 // This instruction has src modifiers
4514 for (unsigned E = Operands.size(); I != E; ++I) {
4515 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4516 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4517 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4518 } else if (Op.isImmModifier()) {
4519 OptionalIdx[Op.getImmTy()] = I;
4520 } else if (Op.isRegOrImm()) {
4521 Op.addRegOrImmOperands(Inst, 1);
4522 } else {
4523 llvm_unreachable("unhandled operand type")::llvm::llvm_unreachable_internal("unhandled operand type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4523)
;
4524 }
4525 }
4526 } else {
4527 // No src modifiers
4528 for (unsigned E = Operands.size(); I != E; ++I) {
4529 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4530 if (Op.isMod()) {
4531 OptionalIdx[Op.getImmTy()] = I;
4532 } else {
4533 Op.addRegOrImmOperands(Inst, 1);
4534 }
4535 }
4536 }
4537
4538 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4539 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4540 }
4541
4542 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4543 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4544 }
4545
4546 // special case v_mac_{f16, f32}:
4547 // it has src2 register operand that is tied to dst operand
4548 // we don't allow modifiers for this operand in assembler so src2_modifiers
4549 // should be 0
4550 if (Opc == AMDGPU::V_MAC_F32_e64_si || Opc == AMDGPU::V_MAC_F32_e64_vi ||
4551 Opc == AMDGPU::V_MAC_F16_e64_vi) {
4552 auto it = Inst.begin();
4553 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
4554 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
4555 ++it;
4556 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4557 }
4558}
4559
4560void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
4561 OptionalImmIndexMap OptionalIdx;
4562 cvtVOP3(Inst, Operands, OptionalIdx);
4563}
4564
4565void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
4566 const OperandVector &Operands) {
4567 OptionalImmIndexMap OptIdx;
4568 const int Opc = Inst.getOpcode();
4569 const MCInstrDesc &Desc = MII.get(Opc);
4570
4571 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
4572
4573 cvtVOP3(Inst, Operands, OptIdx);
4574
4575 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
4576 assert(!IsPacked)(static_cast <bool> (!IsPacked) ? void (0) : __assert_fail
("!IsPacked", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4576, __extension__ __PRETTY_FUNCTION__))
;
4577 Inst.addOperand(Inst.getOperand(0));
4578 }
4579
4580 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
4581 // instruction, and then figure out where to actually put the modifiers
4582
4583 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
4584
4585 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
4586 if (OpSelHiIdx != -1) {
4587 int DefaultVal = IsPacked ? -1 : 0;
4588 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
4589 DefaultVal);
4590 }
4591
4592 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
4593 if (NegLoIdx != -1) {
4594 assert(IsPacked)(static_cast <bool> (IsPacked) ? void (0) : __assert_fail
("IsPacked", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4594, __extension__ __PRETTY_FUNCTION__))
;
4595 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
4596 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
4597 }
4598
4599 const int Ops[] = { AMDGPU::OpName::src0,
4600 AMDGPU::OpName::src1,
4601 AMDGPU::OpName::src2 };
4602 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
4603 AMDGPU::OpName::src1_modifiers,
4604 AMDGPU::OpName::src2_modifiers };
4605
4606 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4607
4608 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4609 unsigned OpSelHi = 0;
4610 unsigned NegLo = 0;
4611 unsigned NegHi = 0;
4612
4613 if (OpSelHiIdx != -1) {
4614 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
4615 }
4616
4617 if (NegLoIdx != -1) {
4618 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
4619 NegLo = Inst.getOperand(NegLoIdx).getImm();
4620 NegHi = Inst.getOperand(NegHiIdx).getImm();
4621 }
4622
4623 for (int J = 0; J < 3; ++J) {
4624 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
4625 if (OpIdx == -1)
4626 break;
4627
4628 uint32_t ModVal = 0;
4629
4630 if ((OpSel & (1 << J)) != 0)
4631 ModVal |= SISrcMods::OP_SEL_0;
4632
4633 if ((OpSelHi & (1 << J)) != 0)
4634 ModVal |= SISrcMods::OP_SEL_1;
4635
4636 if ((NegLo & (1 << J)) != 0)
4637 ModVal |= SISrcMods::NEG;
4638
4639 if ((NegHi & (1 << J)) != 0)
4640 ModVal |= SISrcMods::NEG_HI;
4641
4642 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
4643
4644 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
4645 }
4646}
4647
4648//===----------------------------------------------------------------------===//
4649// dpp
4650//===----------------------------------------------------------------------===//
4651
4652bool AMDGPUOperand::isDPPCtrl() const {
4653 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
4654 if (result) {
4655 int64_t Imm = getImm();
4656 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
4657 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
4658 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
4659 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
4660 (Imm == 0x130) ||
4661 (Imm == 0x134) ||
4662 (Imm == 0x138) ||
4663 (Imm == 0x13c) ||
4664 (Imm == 0x140) ||
4665 (Imm == 0x141) ||
4666 (Imm == 0x142) ||
4667 (Imm == 0x143);
4668 }
4669 return false;
4670}
4671
4672bool AMDGPUOperand::isGPRIdxMode() const {
4673 return isImm() && isUInt<4>(getImm());
4674}
4675
4676bool AMDGPUOperand::isS16Imm() const {
4677 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
4678}
4679
4680bool AMDGPUOperand::isU16Imm() const {
4681 return isImm() && isUInt<16>(getImm());
4682}
4683
4684OperandMatchResultTy
4685AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
4686 SMLoc S = Parser.getTok().getLoc();
4687 StringRef Prefix;
4688 int64_t Int;
4689
4690 if (getLexer().getKind() == AsmToken::Identifier) {
4691 Prefix = Parser.getTok().getString();
4692 } else {
4693 return MatchOperand_NoMatch;
4694 }
4695
4696 if (Prefix == "row_mirror") {
4697 Int = 0x140;
4698 Parser.Lex();
4699 } else if (Prefix == "row_half_mirror") {
4700 Int = 0x141;
4701 Parser.Lex();
4702 } else {
4703 // Check to prevent parseDPPCtrlOps from eating invalid tokens
4704 if (Prefix != "quad_perm"
4705 && Prefix != "row_shl"
4706 && Prefix != "row_shr"
4707 && Prefix != "row_ror"
4708 && Prefix != "wave_shl"
4709 && Prefix != "wave_rol"
4710 && Prefix != "wave_shr"
4711 && Prefix != "wave_ror"
4712 && Prefix != "row_bcast") {
4713 return MatchOperand_NoMatch;
4714 }
4715
4716 Parser.Lex();
4717 if (getLexer().isNot(AsmToken::Colon))
4718 return MatchOperand_ParseFail;
4719
4720 if (Prefix == "quad_perm") {
4721 // quad_perm:[%d,%d,%d,%d]
4722 Parser.Lex();
4723 if (getLexer().isNot(AsmToken::LBrac))
4724 return MatchOperand_ParseFail;
4725 Parser.Lex();
4726
4727 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
4728 return MatchOperand_ParseFail;
4729
4730 for (int i = 0; i < 3; ++i) {
4731 if (getLexer().isNot(AsmToken::Comma))
4732 return MatchOperand_ParseFail;
4733 Parser.Lex();
4734
4735 int64_t Temp;
4736 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
4737 return MatchOperand_ParseFail;
4738 const int shift = i*2 + 2;
4739 Int += (Temp << shift);
4740 }
4741
4742 if (getLexer().isNot(AsmToken::RBrac))
4743 return MatchOperand_ParseFail;
4744 Parser.Lex();
4745 } else {
4746 // sel:%d
4747 Parser.Lex();
4748 if (getParser().parseAbsoluteExpression(Int))
4749 return MatchOperand_ParseFail;
4750
4751 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
4752 Int |= 0x100;
4753 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
4754 Int |= 0x110;
4755 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
4756 Int |= 0x120;
4757 } else if (Prefix == "wave_shl" && 1 == Int) {
4758 Int = 0x130;
4759 } else if (Prefix == "wave_rol" && 1 == Int) {
4760 Int = 0x134;
4761 } else if (Prefix == "wave_shr" && 1 == Int) {
4762 Int = 0x138;
4763 } else if (Prefix == "wave_ror" && 1 == Int) {
4764 Int = 0x13C;
4765 } else if (Prefix == "row_bcast") {
4766 if (Int == 15) {
4767 Int = 0x142;
4768 } else if (Int == 31) {
4769 Int = 0x143;
4770 } else {
4771 return MatchOperand_ParseFail;
4772 }
4773 } else {
4774 return MatchOperand_ParseFail;
4775 }
4776 }
4777 }
4778
4779 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
4780 return MatchOperand_Success;
4781}
4782
4783AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
4784 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
4785}
4786
4787AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
4788 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
4789}
4790
4791AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
4792 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
4793}
4794
4795void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
4796 OptionalImmIndexMap OptionalIdx;
4797
4798 unsigned I = 1;
4799 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4800 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4801 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4802 }
4803
4804 // All DPP instructions with at least one source operand have a fake "old"
4805 // source at the beginning that's tied to the dst operand. Handle it here.
4806 if (Desc.getNumOperands() >= 2)
4807 Inst.addOperand(Inst.getOperand(0));
4808
4809 for (unsigned E = Operands.size(); I != E; ++I) {
4810 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4811 // Add the register arguments
4812 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4813 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
4814 // Skip it.
4815 continue;
4816 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4817 Op.addRegWithFPInputModsOperands(Inst, 2);
4818 } else if (Op.isDPPCtrl()) {
4819 Op.addImmOperands(Inst, 1);
4820 } else if (Op.isImm()) {
4821 // Handle optional arguments
4822 OptionalIdx[Op.getImmTy()] = I;
4823 } else {
4824 llvm_unreachable("Invalid operand type")::llvm::llvm_unreachable_internal("Invalid operand type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4824)
;
4825 }
4826 }
4827
4828 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
4829 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
4830 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
4831}
4832
4833//===----------------------------------------------------------------------===//
4834// sdwa
4835//===----------------------------------------------------------------------===//
4836
4837OperandMatchResultTy
4838AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
4839 AMDGPUOperand::ImmTy Type) {
4840 using namespace llvm::AMDGPU::SDWA;
4841
4842 SMLoc S = Parser.getTok().getLoc();
4843 StringRef Value;
4844 OperandMatchResultTy res;
4845
4846 res = parseStringWithPrefix(Prefix, Value);
4847 if (res != MatchOperand_Success) {
4848 return res;
4849 }
4850
4851 int64_t Int;
4852 Int = StringSwitch<int64_t>(Value)
4853 .Case("BYTE_0", SdwaSel::BYTE_0)
4854 .Case("BYTE_1", SdwaSel::BYTE_1)
4855 .Case("BYTE_2", SdwaSel::BYTE_2)
4856 .Case("BYTE_3", SdwaSel::BYTE_3)
4857 .Case("WORD_0", SdwaSel::WORD_0)
4858 .Case("WORD_1", SdwaSel::WORD_1)
4859 .Case("DWORD", SdwaSel::DWORD)
4860 .Default(0xffffffff);
4861 Parser.Lex(); // eat last token
4862
4863 if (Int == 0xffffffff) {
4864 return MatchOperand_ParseFail;
4865 }
4866
4867 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
4868 return MatchOperand_Success;
4869}
4870
4871OperandMatchResultTy
4872AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
4873 using namespace llvm::AMDGPU::SDWA;
4874
4875 SMLoc S = Parser.getTok().getLoc();
4876 StringRef Value;
4877 OperandMatchResultTy res;
4878
4879 res = parseStringWithPrefix("dst_unused", Value);
4880 if (res != MatchOperand_Success) {
4881 return res;
4882 }
4883
4884 int64_t Int;
4885 Int = StringSwitch<int64_t>(Value)
4886 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4887 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4888 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
4889 .Default(0xffffffff);
4890 Parser.Lex(); // eat last token
4891
4892 if (Int == 0xffffffff) {
4893 return MatchOperand_ParseFail;
4894 }
4895
4896 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
4897 return MatchOperand_Success;
4898}
4899
4900void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
4901 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
4902}
4903
4904void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
4905 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4906}
4907
4908void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
4909 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
4910}
4911
4912void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
4913 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
4914}
4915
4916void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
4917 uint64_t BasicInstType, bool skipVcc) {
4918 using namespace llvm::AMDGPU::SDWA;
4919
4920 OptionalImmIndexMap OptionalIdx;
4921 bool skippedVcc = false;
4922
4923 unsigned I = 1;
4924 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4925 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4926 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4927 }
4928
4929 for (unsigned E = Operands.size(); I != E; ++I) {
4930 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4931 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4932 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
4933 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
4934 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
4935 // Skip VCC only if we didn't skip it on previous iteration.
4936 if (BasicInstType == SIInstrFlags::VOP2 &&
4937 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
4938 skippedVcc = true;
4939 continue;
4940 } else if (BasicInstType == SIInstrFlags::VOPC &&
4941 Inst.getNumOperands() == 0) {
4942 skippedVcc = true;
4943 continue;
4944 }
4945 }
4946 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4947 Op.addRegOrImmWithInputModsOperands(Inst, 2);
4948 } else if (Op.isImm()) {
4949 // Handle optional arguments
4950 OptionalIdx[Op.getImmTy()] = I;
4951 } else {
4952 llvm_unreachable("Invalid operand type")::llvm::llvm_unreachable_internal("Invalid operand type", "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4952)
;
4953 }
4954 skippedVcc = false;
4955 }
4956
4957 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
4958 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
4959 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
4960 switch (BasicInstType) {
4961 case SIInstrFlags::VOP1:
4962 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4963 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4964 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4965 }
4966 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4967 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4968 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4969 break;
4970
4971 case SIInstrFlags::VOP2:
4972 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4973 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4974 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4975 }
4976 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4977 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4978 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4979 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
4980 break;
4981
4982 case SIInstrFlags::VOPC:
4983 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4984 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4985 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
4986 break;
4987
4988 default:
4989 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed")::llvm::llvm_unreachable_internal("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed"
, "/build/llvm-toolchain-snapshot-7~svn325118/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4989)
;
4990 }
4991 }
4992
4993 // special case v_mac_{f16, f32}:
4994 // it has src2 register operand that is tied to dst operand
4995 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
4996 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
4997 auto it = Inst.begin();
4998 std::advance(
4999 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
5000 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5001 }
5002}
5003
5004/// Force static initialization.
5005extern "C" void LLVMInitializeAMDGPUAsmParser() {
5006 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
5007 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
5008}
5009
5010#define GET_REGISTER_MATCHER
5011#define GET_MATCHER_IMPLEMENTATION
5012#define GET_MNEMONIC_SPELL_CHECKER
5013#include "AMDGPUGenAsmMatcher.inc"
5014
5015// This fuction should be defined after auto-generated include so that we have
5016// MatchClassKind enum defined
5017unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
5018 unsigned Kind) {
5019 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
5020 // But MatchInstructionImpl() expects to meet token and fails to validate
5021 // operand. This method checks if we are given immediate operand but expect to
5022 // get corresponding token.
5023 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
5024 switch (Kind) {
5025 case MCK_addr64:
5026 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
5027 case MCK_gds:
5028 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
5029 case MCK_glc:
5030 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
5031 case MCK_d16:
5032 return Operand.isD16() ? Match_Success : Match_InvalidOperand;
5033 case MCK_idxen:
5034 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
5035 case MCK_offen:
5036 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
5037 case MCK_SSrcB32:
5038 // When operands have expression values, they will return true for isToken,
5039 // because it is not possible to distinguish between a token and an
5040 // expression at parse time. MatchInstructionImpl() will always try to
5041 // match an operand as a token, when isToken returns true, and when the
5042 // name of the expression is not a valid token, the match will fail,
5043 // so we need to handle it here.
5044 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
5045 case MCK_SSrcF32:
5046 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
5047 case MCK_SoppBrTarget:
5048 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
5049 case MCK_VReg32OrOff:
5050 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
5051 case MCK_InterpSlot:
5052 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
5053 case MCK_Attr:
5054 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
5055 case MCK_AttrChan:
5056 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
5057 default:
5058 return Match_InvalidOperand;
5059 }
5060}

/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmParserExtension.h

1//===- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks --------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
11#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
12
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/MC/MCParser/MCAsmLexer.h"
16#include "llvm/MC/MCParser/MCAsmParser.h"
17#include "llvm/Support/SMLoc.h"
18
19namespace llvm {
20
21class Twine;
22
23/// \brief Generic interface for extending the MCAsmParser,
24/// which is implemented by target and object file assembly parser
25/// implementations.
26class MCAsmParserExtension {
27 MCAsmParser *Parser;
28
29protected:
30 MCAsmParserExtension();
31
32 // Helper template for implementing static dispatch functions.
33 template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
34 static bool HandleDirective(MCAsmParserExtension *Target,
35 StringRef Directive,
36 SMLoc DirectiveLoc) {
37 T *Obj = static_cast<T*>(Target);
38 return (Obj->*Handler)(Directive, DirectiveLoc);
39 }
40
41 bool BracketExpressionsSupported = false;
42
43public:
44 MCAsmParserExtension(const MCAsmParserExtension &) = delete;
45 MCAsmParserExtension &operator=(const MCAsmParserExtension &) = delete;
46 virtual ~MCAsmParserExtension();
47
48 /// \brief Initialize the extension for parsing using the given \p Parser.
49 /// The extension should use the AsmParser interfaces to register its
50 /// parsing routines.
51 virtual void Initialize(MCAsmParser &Parser);
52
53 /// \name MCAsmParser Proxy Interfaces
54 /// @{
55
56 MCContext &getContext() { return getParser().getContext(); }
57
58 MCAsmLexer &getLexer() { return getParser().getLexer(); }
5
Calling 'MCAsmParserExtension::getParser'
6
Returning from 'MCAsmParserExtension::getParser'
59 const MCAsmLexer &getLexer() const {
60 return const_cast<MCAsmParserExtension *>(this)->getLexer();
61 }
62
63 MCAsmParser &getParser() { return *Parser; }
64 const MCAsmParser &getParser() const {
65 return const_cast<MCAsmParserExtension*>(this)->getParser();
66 }
67
68 SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
69 MCStreamer &getStreamer() { return getParser().getStreamer(); }
70
71 bool Warning(SMLoc L, const Twine &Msg) {
72 return getParser().Warning(L, Msg);
73 }
74
75 bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
76 return getParser().Error(L, Msg, Range);
77 }
78
79 void Note(SMLoc L, const Twine &Msg) {
80 getParser().Note(L, Msg);
81 }
82
83 bool TokError(const Twine &Msg) {
84 return getParser().TokError(Msg);
30
Calling 'MCAsmParserExtension::getParser'
31
Returning from 'MCAsmParserExtension::getParser'
85 }
86
87 const AsmToken &Lex() { return getParser().Lex(); }
88 const AsmToken &getTok() { return getParser().getTok(); }
89 bool parseToken(AsmToken::TokenKind T,
90 const Twine &Msg = "unexpected token") {
91 return getParser().parseToken(T, Msg);
92 }
93
94 bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
95 return getParser().parseMany(parseOne, hasComma);
96 }
97
98 bool parseOptionalToken(AsmToken::TokenKind T) {
99 return getParser().parseOptionalToken(T);
100 }
101
102 bool check(bool P, const Twine &Msg) {
103 return getParser().check(P, Msg);
104 }
105
106 bool check(bool P, SMLoc Loc, const Twine &Msg) {
107 return getParser().check(P, Loc, Msg);
108 }
109
110 bool addErrorSuffix(const Twine &Suffix) {
111 return getParser().addErrorSuffix(Suffix);
112 }
113
114 bool HasBracketExpressions() const { return BracketExpressionsSupported; }
115
116 /// @}
117};
118
119} // end namespace llvm
120
121#endif // LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H

/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h

1//===- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface ------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
11#define LLVM_MC_MCPARSER_MCASMLEXER_H
12
13#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/Support/SMLoc.h"
18#include <algorithm>
19#include <cassert>
20#include <cstddef>
21#include <cstdint>
22#include <string>
23
24namespace llvm {
25
26/// Target independent representation for an assembler token.
27class AsmToken {
28public:
29 enum TokenKind {
30 // Markers
31 Eof, Error,
32
33 // String values.
34 Identifier,
35 String,
36
37 // Integer values.
38 Integer,
39 BigNum, // larger than 64 bits
40
41 // Real values.
42 Real,
43
44 // Comments
45 Comment,
46 HashDirective,
47 // No-value.
48 EndOfStatement,
49 Colon,
50 Space,
51 Plus, Minus, Tilde,
52 Slash, // '/'
53 BackSlash, // '\'
54 LParen, RParen, LBrac, RBrac, LCurly, RCurly,
55 Star, Dot, Comma, Dollar, Equal, EqualEqual,
56
57 Pipe, PipePipe, Caret,
58 Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
59 Less, LessEqual, LessLess, LessGreater,
60 Greater, GreaterEqual, GreaterGreater, At,
61
62 // MIPS unary expression operators such as %neg.
63 PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
64 PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
65 PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
66 PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
67 PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
68 PercentTprel_Lo
69 };
70
71private:
72 TokenKind Kind;
73
74 /// A reference to the entire token contents; this is always a pointer into
75 /// a memory buffer owned by the source manager.
76 StringRef Str;
77
78 APInt IntVal;
79
80public:
81 AsmToken() = default;
82 AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
83 : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
84 AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
85 : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
86
87 TokenKind getKind() const { return Kind; }
88 bool is(TokenKind K) const { return Kind == K; }
89 bool isNot(TokenKind K) const { return Kind != K; }
12
Assuming the condition is false
90
91 SMLoc getLoc() const;
92 SMLoc getEndLoc() const;
93 SMRange getLocRange() const;
94
95 /// Get the contents of a string token (without quotes).
96 StringRef getStringContents() const {
97 assert(Kind == String && "This token isn't a string!")(static_cast <bool> (Kind == String && "This token isn't a string!"
) ? void (0) : __assert_fail ("Kind == String && \"This token isn't a string!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h"
, 97, __extension__ __PRETTY_FUNCTION__))
;
98 return Str.slice(1, Str.size() - 1);
99 }
100
101 /// Get the identifier string for the current token, which should be an
102 /// identifier or a string. This gets the portion of the string which should
103 /// be used as the identifier, e.g., it does not include the quotes on
104 /// strings.
105 StringRef getIdentifier() const {
106 if (Kind == Identifier)
107 return getString();
108 return getStringContents();
109 }
110
111 /// Get the string for the current token, this includes all characters (for
112 /// example, the quotes on strings) in the token.
113 ///
114 /// The returned StringRef points into the source manager's memory buffer, and
115 /// is safe to store across calls to Lex().
116 StringRef getString() const { return Str; }
117
118 // FIXME: Don't compute this in advance, it makes every token larger, and is
119 // also not generally what we want (it is nicer for recovery etc. to lex 123br
120 // as a single token, then diagnose as an invalid number).
121 int64_t getIntVal() const {
122 assert(Kind == Integer && "This token isn't an integer!")(static_cast <bool> (Kind == Integer && "This token isn't an integer!"
) ? void (0) : __assert_fail ("Kind == Integer && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 return IntVal.getZExtValue();
124 }
125
126 APInt getAPIntVal() const {
127 assert((Kind == Integer || Kind == BigNum) &&(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
128 "This token isn't an integer!")(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
;
129 return IntVal;
130 }
131};
132
133/// A callback class which is notified of each comment in an assembly file as
134/// it is lexed.
135class AsmCommentConsumer {
136public:
137 virtual ~AsmCommentConsumer() = default;
138
139 /// Callback function for when a comment is lexed. Loc is the start of the
140 /// comment text (excluding the comment-start marker). CommentText is the text
141 /// of the comment, excluding the comment start and end markers, and the
142 /// newline for single-line comments.
143 virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
144};
145
146
147/// Generic assembler lexer interface, for use by target specific assembly
148/// lexers.
149class MCAsmLexer {
150 /// The current token, stored in the base class for faster access.
151 SmallVector<AsmToken, 1> CurTok;
152
153 /// The location and description of the current error
154 SMLoc ErrLoc;
155 std::string Err;
156
157protected: // Can only create subclasses.
158 const char *TokStart = nullptr;
159 bool SkipSpace = true;
160 bool AllowAtInIdentifier;
161 bool IsAtStartOfStatement = true;
162 AsmCommentConsumer *CommentConsumer = nullptr;
163
164 bool AltMacroMode;
165 MCAsmLexer();
166
167 virtual AsmToken LexToken() = 0;
168
169 void SetError(SMLoc errLoc, const std::string &err) {
170 ErrLoc = errLoc;
171 Err = err;
172 }
173
174public:
175 MCAsmLexer(const MCAsmLexer &) = delete;
176 MCAsmLexer &operator=(const MCAsmLexer &) = delete;
177 virtual ~MCAsmLexer();
178
179 bool IsaAltMacroMode() {
180 return AltMacroMode;
181 }
182
183 void SetAltMacroMode(bool AltMacroSet) {
184 AltMacroMode = AltMacroSet;
185 }
186
187 /// Consume the next token from the input stream and return it.
188 ///
189 /// The lexer will continuosly return the end-of-file token once the end of
190 /// the main input file has been reached.
191 const AsmToken &Lex() {
192 assert(!CurTok.empty())(static_cast <bool> (!CurTok.empty()) ? void (0) : __assert_fail
("!CurTok.empty()", "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 // Mark if we parsing out a EndOfStatement.
194 IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
195 CurTok.erase(CurTok.begin());
196 // LexToken may generate multiple tokens via UnLex but will always return
197 // the first one. Place returned value at head of CurTok vector.
198 if (CurTok.empty()) {
199 AsmToken T = LexToken();
200 CurTok.insert(CurTok.begin(), T);
201 }
202 return CurTok.front();
203 }
204
205 void UnLex(AsmToken const &Token) {
206 IsAtStartOfStatement = false;
207 CurTok.insert(CurTok.begin(), Token);
208 }
209
210 bool isAtStartOfStatement() { return IsAtStartOfStatement; }
211
212 virtual StringRef LexUntilEndOfStatement() = 0;
213
214 /// Get the current source location.
215 SMLoc getLoc() const;
216
217 /// Get the current (last) lexed token.
218 const AsmToken &getTok() const {
219 return CurTok[0];
220 }
221
222 /// Look ahead at the next token to be lexed.
223 const AsmToken peekTok(bool ShouldSkipSpace = true) {
224 AsmToken Tok;
225
226 MutableArrayRef<AsmToken> Buf(Tok);
227 size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);
228
229 assert(ReadCount == 1)(static_cast <bool> (ReadCount == 1) ? void (0) : __assert_fail
("ReadCount == 1", "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/MC/MCParser/MCAsmLexer.h"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230 (void)ReadCount;
231
232 return Tok;
233 }
234
235 /// Look ahead an arbitrary number of tokens.
236 virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
237 bool ShouldSkipSpace = true) = 0;
238
239 /// Get the current error location
240 SMLoc getErrLoc() {
241 return ErrLoc;
242 }
243
244 /// Get the current error string
245 const std::string &getErr() {
246 return Err;
247 }
248
249 /// Get the kind of current token.
250 AsmToken::TokenKind getKind() const { return getTok().getKind(); }
251
252 /// Check if the current token has kind \p K.
253 bool is(AsmToken::TokenKind K) const { return getTok().is(K); }
254
255 /// Check if the current token has kind \p K.
256 bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }
9
Calling 'MCAsmLexer::getTok'
10
Returning from 'MCAsmLexer::getTok'
11
Calling 'AsmToken::isNot'
13
Returning from 'AsmToken::isNot'
257
258 /// Set whether spaces should be ignored by the lexer
259 void setSkipSpace(bool val) { SkipSpace = val; }
260
261 bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
262 void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
263
264 void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
265 this->CommentConsumer = CommentConsumer;
266 }
267};
268
269} // end namespace llvm
270
271#endif // LLVM_MC_MCPARSER_MCASMLEXER_H

/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h

1//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_ADT_TWINE_H
11#define LLVM_ADT_TWINE_H
12
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/Support/ErrorHandling.h"
16#include <cassert>
17#include <cstdint>
18#include <string>
19
20namespace llvm {
21
22 class formatv_object_base;
23 class raw_ostream;
24
25 /// Twine - A lightweight data structure for efficiently representing the
26 /// concatenation of temporary values as strings.
27 ///
28 /// A Twine is a kind of rope, it represents a concatenated string using a
29 /// binary-tree, where the string is the preorder of the nodes. Since the
30 /// Twine can be efficiently rendered into a buffer when its result is used,
31 /// it avoids the cost of generating temporary values for intermediate string
32 /// results -- particularly in cases when the Twine result is never
33 /// required. By explicitly tracking the type of leaf nodes, we can also avoid
34 /// the creation of temporary strings for conversions operations (such as
35 /// appending an integer to a string).
36 ///
37 /// A Twine is not intended for use directly and should not be stored, its
38 /// implementation relies on the ability to store pointers to temporary stack
39 /// objects which may be deallocated at the end of a statement. Twines should
40 /// only be used accepted as const references in arguments, when an API wishes
41 /// to accept possibly-concatenated strings.
42 ///
43 /// Twines support a special 'null' value, which always concatenates to form
44 /// itself, and renders as an empty string. This can be returned from APIs to
45 /// effectively nullify any concatenations performed on the result.
46 ///
47 /// \b Implementation
48 ///
49 /// Given the nature of a Twine, it is not possible for the Twine's
50 /// concatenation method to construct interior nodes; the result must be
51 /// represented inside the returned value. For this reason a Twine object
52 /// actually holds two values, the left- and right-hand sides of a
53 /// concatenation. We also have nullary Twine objects, which are effectively
54 /// sentinel values that represent empty strings.
55 ///
56 /// Thus, a Twine can effectively have zero, one, or two children. The \see
57 /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
58 /// testing the number of children.
59 ///
60 /// We maintain a number of invariants on Twine objects (FIXME: Why):
61 /// - Nullary twines are always represented with their Kind on the left-hand
62 /// side, and the Empty kind on the right-hand side.
63 /// - Unary twines are always represented with the value on the left-hand
64 /// side, and the Empty kind on the right-hand side.
65 /// - If a Twine has another Twine as a child, that child should always be
66 /// binary (otherwise it could have been folded into the parent).
67 ///
68 /// These invariants are check by \see isValid().
69 ///
70 /// \b Efficiency Considerations
71 ///
72 /// The Twine is designed to yield efficient and small code for common
73 /// situations. For this reason, the concat() method is inlined so that
74 /// concatenations of leaf nodes can be optimized into stores directly into a
75 /// single stack allocated object.
76 ///
77 /// In practice, not all compilers can be trusted to optimize concat() fully,
78 /// so we provide two additional methods (and accompanying operator+
79 /// overloads) to guarantee that particularly important cases (cstring plus
80 /// StringRef) codegen as desired.
81 class Twine {
82 /// NodeKind - Represent the type of an argument.
83 enum NodeKind : unsigned char {
84 /// An empty string; the result of concatenating anything with it is also
85 /// empty.
86 NullKind,
87
88 /// The empty string.
89 EmptyKind,
90
91 /// A pointer to a Twine instance.
92 TwineKind,
93
94 /// A pointer to a C string instance.
95 CStringKind,
96
97 /// A pointer to an std::string instance.
98 StdStringKind,
99
100 /// A pointer to a StringRef instance.
101 StringRefKind,
102
103 /// A pointer to a SmallString instance.
104 SmallStringKind,
105
106 /// A pointer to a formatv_object_base instance.
107 FormatvObjectKind,
108
109 /// A char value, to render as a character.
110 CharKind,
111
112 /// An unsigned int value, to render as an unsigned decimal integer.
113 DecUIKind,
114
115 /// An int value, to render as a signed decimal integer.
116 DecIKind,
117
118 /// A pointer to an unsigned long value, to render as an unsigned decimal
119 /// integer.
120 DecULKind,
121
122 /// A pointer to a long value, to render as a signed decimal integer.
123 DecLKind,
124
125 /// A pointer to an unsigned long long value, to render as an unsigned
126 /// decimal integer.
127 DecULLKind,
128
129 /// A pointer to a long long value, to render as a signed decimal integer.
130 DecLLKind,
131
132 /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
133 /// integer.
134 UHexKind
135 };
136
137 union Child
138 {
139 const Twine *twine;
140 const char *cString;
141 const std::string *stdString;
142 const StringRef *stringRef;
143 const SmallVectorImpl<char> *smallString;
144 const formatv_object_base *formatvObject;
145 char character;
146 unsigned int decUI;
147 int decI;
148 const unsigned long *decUL;
149 const long *decL;
150 const unsigned long long *decULL;
151 const long long *decLL;
152 const uint64_t *uHex;
153 };
154
155 /// LHS - The prefix in the concatenation, which may be uninitialized for
156 /// Null or Empty kinds.
157 Child LHS;
158
159 /// RHS - The suffix in the concatenation, which may be uninitialized for
160 /// Null or Empty kinds.
161 Child RHS;
162
163 /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
164 NodeKind LHSKind = EmptyKind;
165
166 /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
167 NodeKind RHSKind = EmptyKind;
168
169 /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
170 explicit Twine(NodeKind Kind) : LHSKind(Kind) {
171 assert(isNullary() && "Invalid kind!")(static_cast <bool> (isNullary() && "Invalid kind!"
) ? void (0) : __assert_fail ("isNullary() && \"Invalid kind!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 171, __extension__ __PRETTY_FUNCTION__))
;
172 }
173
174 /// Construct a binary twine.
175 explicit Twine(const Twine &LHS, const Twine &RHS)
176 : LHSKind(TwineKind), RHSKind(TwineKind) {
177 this->LHS.twine = &LHS;
178 this->RHS.twine = &RHS;
179 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180 }
181
182 /// Construct a twine from explicit values.
183 explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
184 : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
185 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 185, __extension__ __PRETTY_FUNCTION__))
;
186 }
187
188 /// Check for the null twine.
189 bool isNull() const {
190 return getLHSKind() == NullKind;
191 }
192
193 /// Check for the empty twine.
194 bool isEmpty() const {
195 return getLHSKind() == EmptyKind;
196 }
197
198 /// Check if this is a nullary twine (null or empty).
199 bool isNullary() const {
200 return isNull() || isEmpty();
201 }
202
203 /// Check if this is a unary twine.
204 bool isUnary() const {
205 return getRHSKind() == EmptyKind && !isNullary();
206 }
207
208 /// Check if this is a binary twine.
209 bool isBinary() const {
210 return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
211 }
212
213 /// Check if this is a valid twine (satisfying the invariants on
214 /// order and number of arguments).
215 bool isValid() const {
216 // Nullary twines always have Empty on the RHS.
217 if (isNullary() && getRHSKind() != EmptyKind)
218 return false;
219
220 // Null should never appear on the RHS.
221 if (getRHSKind() == NullKind)
222 return false;
223
224 // The RHS cannot be non-empty if the LHS is empty.
225 if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
226 return false;
227
228 // A twine child should always be binary.
229 if (getLHSKind() == TwineKind &&
230 !LHS.twine->isBinary())
231 return false;
232 if (getRHSKind() == TwineKind &&
233 !RHS.twine->isBinary())
234 return false;
235
236 return true;
237 }
238
239 /// Get the NodeKind of the left-hand side.
240 NodeKind getLHSKind() const { return LHSKind; }
241
242 /// Get the NodeKind of the right-hand side.
243 NodeKind getRHSKind() const { return RHSKind; }
244
245 /// Print one child from a twine.
246 void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
247
248 /// Print the representation of one child from a twine.
249 void printOneChildRepr(raw_ostream &OS, Child Ptr,
250 NodeKind Kind) const;
251
252 public:
253 /// @name Constructors
254 /// @{
255
256 /// Construct from an empty string.
257 /*implicit*/ Twine() {
258 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 258, __extension__ __PRETTY_FUNCTION__))
;
259 }
260
261 Twine(const Twine &) = default;
262
263 /// Construct from a C string.
264 ///
265 /// We take care here to optimize "" into the empty twine -- this will be
266 /// optimized out for string constants. This allows Twine arguments have
267 /// default "" values, without introducing unnecessary string constants.
268 /*implicit*/ Twine(const char *Str) {
22
Calling implicit default constructor for 'Child'
23
Returning from default constructor for 'Child'
24
Calling implicit default constructor for 'Child'
25
Returning from default constructor for 'Child'
269 if (Str[0] != '\0') {
26
Taking true branch
270 LHS.cString = Str;
271 LHSKind = CStringKind;
272 } else
273 LHSKind = EmptyKind;
274
275 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 275, __extension__ __PRETTY_FUNCTION__))
;
27
Within the expansion of the macro 'assert':
a
Assuming the condition is true
276 }
277
278 /// Construct from an std::string.
279 /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
280 LHS.stdString = &Str;
281 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 281, __extension__ __PRETTY_FUNCTION__))
;
282 }
283
284 /// Construct from a StringRef.
285 /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) {
286 LHS.stringRef = &Str;
287 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 287, __extension__ __PRETTY_FUNCTION__))
;
288 }
289
290 /// Construct from a SmallString.
291 /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
292 : LHSKind(SmallStringKind) {
293 LHS.smallString = &Str;
294 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 }
296
297 /// Construct from a formatv_object_base.
298 /*implicit*/ Twine(const formatv_object_base &Fmt)
299 : LHSKind(FormatvObjectKind) {
300 LHS.formatvObject = &Fmt;
301 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 301, __extension__ __PRETTY_FUNCTION__))
;
302 }
303
304 /// Construct from a char.
305 explicit Twine(char Val) : LHSKind(CharKind) {
306 LHS.character = Val;
307 }
308
309 /// Construct from a signed char.
310 explicit Twine(signed char Val) : LHSKind(CharKind) {
311 LHS.character = static_cast<char>(Val);
312 }
313
314 /// Construct from an unsigned char.
315 explicit Twine(unsigned char Val) : LHSKind(CharKind) {
316 LHS.character = static_cast<char>(Val);
317 }
318
319 /// Construct a twine to print \p Val as an unsigned decimal integer.
320 explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
321 LHS.decUI = Val;
322 }
323
324 /// Construct a twine to print \p Val as a signed decimal integer.
325 explicit Twine(int Val) : LHSKind(DecIKind) {
326 LHS.decI = Val;
327 }
328
329 /// Construct a twine to print \p Val as an unsigned decimal integer.
330 explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
331 LHS.decUL = &Val;
332 }
333
334 /// Construct a twine to print \p Val as a signed decimal integer.
335 explicit Twine(const long &Val) : LHSKind(DecLKind) {
336 LHS.decL = &Val;
337 }
338
339 /// Construct a twine to print \p Val as an unsigned decimal integer.
340 explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
341 LHS.decULL = &Val;
342 }
343
344 /// Construct a twine to print \p Val as a signed decimal integer.
345 explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
346 LHS.decLL = &Val;
347 }
348
349 // FIXME: Unfortunately, to make sure this is as efficient as possible we
350 // need extra binary constructors from particular types. We can't rely on
351 // the compiler to be smart enough to fold operator+()/concat() down to the
352 // right thing. Yet.
353
354 /// Construct as the concatenation of a C string and a StringRef.
355 /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
356 : LHSKind(CStringKind), RHSKind(StringRefKind) {
357 this->LHS.cString = LHS;
358 this->RHS.stringRef = &RHS;
359 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 359, __extension__ __PRETTY_FUNCTION__))
;
360 }
361
362 /// Construct as the concatenation of a StringRef and a C string.
363 /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
364 : LHSKind(StringRefKind), RHSKind(CStringKind) {
365 this->LHS.stringRef = &LHS;
366 this->RHS.cString = RHS;
367 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 367, __extension__ __PRETTY_FUNCTION__))
;
368 }
369
370 /// Since the intended use of twines is as temporary objects, assignments
371 /// when concatenating might cause undefined behavior or stack corruptions
372 Twine &operator=(const Twine &) = delete;
373
374 /// Create a 'null' string, which is an empty string that always
375 /// concatenates to form another empty string.
376 static Twine createNull() {
377 return Twine(NullKind);
378 }
379
380 /// @}
381 /// @name Numeric Conversions
382 /// @{
383
384 // Construct a twine to print \p Val as an unsigned hexadecimal integer.
385 static Twine utohexstr(const uint64_t &Val) {
386 Child LHS, RHS;
387 LHS.uHex = &Val;
388 RHS.twine = nullptr;
389 return Twine(LHS, UHexKind, RHS, EmptyKind);
390 }
391
392 /// @}
393 /// @name Predicate Operations
394 /// @{
395
396 /// Check if this twine is trivially empty; a false return value does not
397 /// necessarily mean the twine is empty.
398 bool isTriviallyEmpty() const {
399 return isNullary();
400 }
401
402 /// Return true if this twine can be dynamically accessed as a single
403 /// StringRef value with getSingleStringRef().
404 bool isSingleStringRef() const {
405 if (getRHSKind() != EmptyKind) return false;
406
407 switch (getLHSKind()) {
408 case EmptyKind:
409 case CStringKind:
410 case StdStringKind:
411 case StringRefKind:
412 case SmallStringKind:
413 return true;
414 default:
415 return false;
416 }
417 }
418
419 /// @}
420 /// @name String Operations
421 /// @{
422
423 Twine concat(const Twine &Suffix) const;
424
425 /// @}
426 /// @name Output & Conversion.
427 /// @{
428
429 /// Return the twine contents as a std::string.
430 std::string str() const;
431
432 /// Append the concatenated string into the given SmallString or SmallVector.
433 void toVector(SmallVectorImpl<char> &Out) const;
434
435 /// This returns the twine as a single StringRef. This method is only valid
436 /// if isSingleStringRef() is true.
437 StringRef getSingleStringRef() const {
438 assert(isSingleStringRef() &&"This cannot be had as a single stringref!")(static_cast <bool> (isSingleStringRef() &&"This cannot be had as a single stringref!"
) ? void (0) : __assert_fail ("isSingleStringRef() &&\"This cannot be had as a single stringref!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 438, __extension__ __PRETTY_FUNCTION__))
;
439 switch (getLHSKind()) {
440 default: llvm_unreachable("Out of sync with isSingleStringRef")::llvm::llvm_unreachable_internal("Out of sync with isSingleStringRef"
, "/build/llvm-toolchain-snapshot-7~svn325118/include/llvm/ADT/Twine.h"
, 440)
;
441 case EmptyKind: return StringRef();
442 case CStringKind: return StringRef(LHS.cString);
443 case StdStringKind: return StringRef(*LHS.stdString);
444 case StringRefKind: return *LHS.stringRef;
445 case SmallStringKind:
446 return StringRef(LHS.smallString->data(), LHS.smallString->size());
447 }
448 }
449
450 /// This returns the twine as a single StringRef if it can be
451 /// represented as such. Otherwise the twine is written into the given
452 /// SmallVector and a StringRef to the SmallVector's data is returned.
453 StringRef toStringRef(SmallVectorImpl<char> &Out) const {
454 if (isSingleStringRef())
455 return getSingleStringRef();
456 toVector(Out);
457 return StringRef(Out.data(), Out.size());
458 }
459
460 /// This returns the twine as a single null terminated StringRef if it
461 /// can be represented as such. Otherwise the twine is written into the
462 /// given SmallVector and a StringRef to the SmallVector's data is returned.
463 ///
464 /// The returned StringRef's size does not include the null terminator.
465 StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
466
467 /// Write the concatenated string represented by this twine to the
468 /// stream \p OS.
469 void print(raw_ostream &OS) const;
470
471 /// Dump the concatenated string represented by this twine to stderr.
472 void dump() const;
473
474 /// Write the representation of this twine to the stream \p OS.
475 void printRepr(raw_ostream &OS) const;
476
477 /// Dump the representation of this twine to stderr.
478 void dumpRepr() const;
479
480 /// @}
481 };
482
483 /// @name Twine Inline Implementations
484 /// @{
485
486 inline Twine Twine::concat(const Twine &Suffix) const {
487 // Concatenation with null is null.
488 if (isNull() || Suffix.isNull())
489 return Twine(NullKind);
490
491 // Concatenation with empty yields the other side.
492 if (isEmpty())
493 return Suffix;
494 if (Suffix.isEmpty())
495 return *this;
496
497 // Otherwise we need to create a new node, taking care to fold in unary
498 // twines.
499 Child NewLHS, NewRHS;
500 NewLHS.twine = this;
501 NewRHS.twine = &Suffix;
502 NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
503 if (isUnary()) {
504 NewLHS = LHS;
505 NewLHSKind = getLHSKind();
506 }
507 if (Suffix.isUnary()) {
508 NewRHS = Suffix.LHS;
509 NewRHSKind = Suffix.getLHSKind();
510 }
511
512 return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
513 }
514
515 inline Twine operator+(const Twine &LHS, const Twine &RHS) {
516 return LHS.concat(RHS);
517 }
518
519 /// Additional overload to guarantee simplified codegen; this is equivalent to
520 /// concat().
521
522 inline Twine operator+(const char *LHS, const StringRef &RHS) {
523 return Twine(LHS, RHS);
524 }
525
526 /// Additional overload to guarantee simplified codegen; this is equivalent to
527 /// concat().
528
529 inline Twine operator+(const StringRef &LHS, const char *RHS) {
530 return Twine(LHS, RHS);
531 }
532
533 inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
534 RHS.print(OS);
535 return OS;
536 }
537
538 /// @}
539
540} // end namespace llvm
541
542#endif // LLVM_ADT_TWINE_H