Bug Summary

File:lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Warning:line 2344, column 3
1st function call argument is an uninitialized value

Annotated Source Code

/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp

1//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPU.h"
11#include "AMDKernelCodeT.h"
12#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13#include "MCTargetDesc/AMDGPUTargetStreamer.h"
14#include "SIDefines.h"
15#include "Utils/AMDGPUAsmUtils.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "Utils/AMDKernelCodeTUtils.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallBitVector.h"
23#include "llvm/ADT/SmallString.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/BinaryFormat/ELF.h"
28#include "llvm/CodeGen/MachineValueType.h"
29#include "llvm/MC/MCAsmInfo.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
33#include "llvm/MC/MCInstrDesc.h"
34#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
37#include "llvm/MC/MCParser/MCAsmParserExtension.h"
38#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39#include "llvm/MC/MCParser/MCTargetAsmParser.h"
40#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
43#include "llvm/MC/MCSymbol.h"
44#include "llvm/Support/AMDGPUMetadata.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/SMLoc.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <algorithm>
53#include <cassert>
54#include <cstdint>
55#include <cstring>
56#include <iterator>
57#include <map>
58#include <memory>
59#include <string>
60
61using namespace llvm;
62using namespace llvm::AMDGPU;
63
64namespace {
65
66class AMDGPUAsmParser;
67
68enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
69
70//===----------------------------------------------------------------------===//
71// Operand
72//===----------------------------------------------------------------------===//
73
74class AMDGPUOperand : public MCParsedAsmOperand {
75 enum KindTy {
76 Token,
77 Immediate,
78 Register,
79 Expression
80 } Kind;
81
82 SMLoc StartLoc, EndLoc;
83 const AMDGPUAsmParser *AsmParser;
84
85public:
86 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
87 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
88
89 using Ptr = std::unique_ptr<AMDGPUOperand>;
90
91 struct Modifiers {
92 bool Abs = false;
93 bool Neg = false;
94 bool Sext = false;
95
96 bool hasFPModifiers() const { return Abs || Neg; }
97 bool hasIntModifiers() const { return Sext; }
98 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
99
100 int64_t getFPModifiersOperand() const {
101 int64_t Operand = 0;
102 Operand |= Abs ? SISrcMods::ABS : 0;
103 Operand |= Neg ? SISrcMods::NEG : 0;
104 return Operand;
105 }
106
107 int64_t getIntModifiersOperand() const {
108 int64_t Operand = 0;
109 Operand |= Sext ? SISrcMods::SEXT : 0;
110 return Operand;
111 }
112
113 int64_t getModifiersOperand() const {
114 assert(!(hasFPModifiers() && hasIntModifiers())(static_cast <bool> (!(hasFPModifiers() && hasIntModifiers
()) && "fp and int modifiers should not be used simultaneously"
) ? void (0) : __assert_fail ("!(hasFPModifiers() && hasIntModifiers()) && \"fp and int modifiers should not be used simultaneously\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 115, __extension__ __PRETTY_FUNCTION__))
115 && "fp and int modifiers should not be used simultaneously")(static_cast <bool> (!(hasFPModifiers() && hasIntModifiers
()) && "fp and int modifiers should not be used simultaneously"
) ? void (0) : __assert_fail ("!(hasFPModifiers() && hasIntModifiers()) && \"fp and int modifiers should not be used simultaneously\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 115, __extension__ __PRETTY_FUNCTION__))
;
116 if (hasFPModifiers()) {
117 return getFPModifiersOperand();
118 } else if (hasIntModifiers()) {
119 return getIntModifiersOperand();
120 } else {
121 return 0;
122 }
123 }
124
125 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
126 };
127
128 enum ImmTy {
129 ImmTyNone,
130 ImmTyGDS,
131 ImmTyOffen,
132 ImmTyIdxen,
133 ImmTyAddr64,
134 ImmTyOffset,
135 ImmTyOffset0,
136 ImmTyOffset1,
137 ImmTyGLC,
138 ImmTySLC,
139 ImmTyTFE,
140 ImmTyClampSI,
141 ImmTyOModSI,
142 ImmTyDppCtrl,
143 ImmTyDppRowMask,
144 ImmTyDppBankMask,
145 ImmTyDppBoundCtrl,
146 ImmTySdwaDstSel,
147 ImmTySdwaSrc0Sel,
148 ImmTySdwaSrc1Sel,
149 ImmTySdwaDstUnused,
150 ImmTyDMask,
151 ImmTyUNorm,
152 ImmTyDA,
153 ImmTyR128,
154 ImmTyLWE,
155 ImmTyExpTgt,
156 ImmTyExpCompr,
157 ImmTyExpVM,
158 ImmTyDFMT,
159 ImmTyNFMT,
160 ImmTyHwreg,
161 ImmTyOff,
162 ImmTySendMsg,
163 ImmTyInterpSlot,
164 ImmTyInterpAttr,
165 ImmTyAttrChan,
166 ImmTyOpSel,
167 ImmTyOpSelHi,
168 ImmTyNegLo,
169 ImmTyNegHi,
170 ImmTySwizzle,
171 ImmTyHigh
172 };
173
174 struct TokOp {
175 const char *Data;
176 unsigned Length;
177 };
178
179 struct ImmOp {
180 int64_t Val;
181 ImmTy Type;
182 bool IsFPImm;
183 Modifiers Mods;
184 };
185
186 struct RegOp {
187 unsigned RegNo;
188 bool IsForcedVOP3;
189 Modifiers Mods;
190 };
191
192 union {
193 TokOp Tok;
194 ImmOp Imm;
195 RegOp Reg;
196 const MCExpr *Expr;
197 };
198
199 bool isToken() const override {
200 if (Kind == Token)
201 return true;
202
203 if (Kind != Expression || !Expr)
204 return false;
205
206 // When parsing operands, we can't always tell if something was meant to be
207 // a token, like 'gds', or an expression that references a global variable.
208 // In this case, we assume the string is an expression, and if we need to
209 // interpret is a token, then we treat the symbol name as the token.
210 return isa<MCSymbolRefExpr>(Expr);
211 }
212
213 bool isImm() const override {
214 return Kind == Immediate;
215 }
216
217 bool isInlinableImm(MVT type) const;
218 bool isLiteralImm(MVT type) const;
219
220 bool isRegKind() const {
221 return Kind == Register;
222 }
223
224 bool isReg() const override {
225 return isRegKind() && !hasModifiers();
226 }
227
228 bool isRegOrImmWithInputMods(MVT type) const {
229 return isRegKind() || isInlinableImm(type);
230 }
231
232 bool isRegOrImmWithInt16InputMods() const {
233 return isRegOrImmWithInputMods(MVT::i16);
234 }
235
236 bool isRegOrImmWithInt32InputMods() const {
237 return isRegOrImmWithInputMods(MVT::i32);
238 }
239
240 bool isRegOrImmWithInt64InputMods() const {
241 return isRegOrImmWithInputMods(MVT::i64);
242 }
243
244 bool isRegOrImmWithFP16InputMods() const {
245 return isRegOrImmWithInputMods(MVT::f16);
246 }
247
248 bool isRegOrImmWithFP32InputMods() const {
249 return isRegOrImmWithInputMods(MVT::f32);
250 }
251
252 bool isRegOrImmWithFP64InputMods() const {
253 return isRegOrImmWithInputMods(MVT::f64);
254 }
255
256 bool isVReg() const {
257 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
258 isRegClass(AMDGPU::VReg_64RegClassID) ||
259 isRegClass(AMDGPU::VReg_96RegClassID) ||
260 isRegClass(AMDGPU::VReg_128RegClassID) ||
261 isRegClass(AMDGPU::VReg_256RegClassID) ||
262 isRegClass(AMDGPU::VReg_512RegClassID);
263 }
264
265 bool isVReg32OrOff() const {
266 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
267 }
268
269 bool isSDWARegKind() const;
270
271 bool isImmTy(ImmTy ImmT) const {
272 return isImm() && Imm.Type == ImmT;
273 }
274
275 bool isImmModifier() const {
276 return isImm() && Imm.Type != ImmTyNone;
277 }
278
279 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
280 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
281 bool isDMask() const { return isImmTy(ImmTyDMask); }
282 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
283 bool isDA() const { return isImmTy(ImmTyDA); }
284 bool isR128() const { return isImmTy(ImmTyUNorm); }
285 bool isLWE() const { return isImmTy(ImmTyLWE); }
286 bool isOff() const { return isImmTy(ImmTyOff); }
287 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
288 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
289 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
290 bool isOffen() const { return isImmTy(ImmTyOffen); }
291 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
292 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
293 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
294 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
295 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
296
297 bool isOffsetU12() const { return isImmTy(ImmTyOffset) && isUInt<12>(getImm()); }
298 bool isOffsetS13() const { return isImmTy(ImmTyOffset) && isInt<13>(getImm()); }
299 bool isGDS() const { return isImmTy(ImmTyGDS); }
300 bool isGLC() const { return isImmTy(ImmTyGLC); }
301 bool isSLC() const { return isImmTy(ImmTySLC); }
302 bool isTFE() const { return isImmTy(ImmTyTFE); }
303 bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); }
304 bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); }
305 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
306 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
307 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
308 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
309 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
310 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
311 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
312 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
313 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
314 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
315 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
316 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
317 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
318 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
319 bool isHigh() const { return isImmTy(ImmTyHigh); }
320
321 bool isMod() const {
322 return isClampSI() || isOModSI();
323 }
324
325 bool isRegOrImm() const {
326 return isReg() || isImm();
327 }
328
329 bool isRegClass(unsigned RCID) const;
330
331 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
332 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
333 }
334
335 bool isSCSrcB16() const {
336 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
337 }
338
339 bool isSCSrcV2B16() const {
340 return isSCSrcB16();
341 }
342
343 bool isSCSrcB32() const {
344 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
345 }
346
347 bool isSCSrcB64() const {
348 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
349 }
350
351 bool isSCSrcF16() const {
352 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
353 }
354
355 bool isSCSrcV2F16() const {
356 return isSCSrcF16();
357 }
358
359 bool isSCSrcF32() const {
360 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
361 }
362
363 bool isSCSrcF64() const {
364 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
365 }
366
367 bool isSSrcB32() const {
368 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
369 }
370
371 bool isSSrcB16() const {
372 return isSCSrcB16() || isLiteralImm(MVT::i16);
373 }
374
375 bool isSSrcV2B16() const {
376 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 376)
;
377 return isSSrcB16();
378 }
379
380 bool isSSrcB64() const {
381 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
382 // See isVSrc64().
383 return isSCSrcB64() || isLiteralImm(MVT::i64);
384 }
385
386 bool isSSrcF32() const {
387 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
388 }
389
390 bool isSSrcF64() const {
391 return isSCSrcB64() || isLiteralImm(MVT::f64);
392 }
393
394 bool isSSrcF16() const {
395 return isSCSrcB16() || isLiteralImm(MVT::f16);
396 }
397
398 bool isSSrcV2F16() const {
399 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 399)
;
400 return isSSrcF16();
401 }
402
403 bool isVCSrcB32() const {
404 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
405 }
406
407 bool isVCSrcB64() const {
408 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
409 }
410
411 bool isVCSrcB16() const {
412 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
413 }
414
415 bool isVCSrcV2B16() const {
416 return isVCSrcB16();
417 }
418
419 bool isVCSrcF32() const {
420 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
421 }
422
423 bool isVCSrcF64() const {
424 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
425 }
426
427 bool isVCSrcF16() const {
428 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
429 }
430
431 bool isVCSrcV2F16() const {
432 return isVCSrcF16();
433 }
434
435 bool isVSrcB32() const {
436 return isVCSrcF32() || isLiteralImm(MVT::i32);
437 }
438
439 bool isVSrcB64() const {
440 return isVCSrcF64() || isLiteralImm(MVT::i64);
441 }
442
443 bool isVSrcB16() const {
444 return isVCSrcF16() || isLiteralImm(MVT::i16);
445 }
446
447 bool isVSrcV2B16() const {
448 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 448)
;
449 return isVSrcB16();
450 }
451
452 bool isVSrcF32() const {
453 return isVCSrcF32() || isLiteralImm(MVT::f32);
454 }
455
456 bool isVSrcF64() const {
457 return isVCSrcF64() || isLiteralImm(MVT::f64);
458 }
459
460 bool isVSrcF16() const {
461 return isVCSrcF16() || isLiteralImm(MVT::f16);
462 }
463
464 bool isVSrcV2F16() const {
465 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 465)
;
466 return isVSrcF16();
467 }
468
469 bool isKImmFP32() const {
470 return isLiteralImm(MVT::f32);
471 }
472
473 bool isKImmFP16() const {
474 return isLiteralImm(MVT::f16);
475 }
476
477 bool isMem() const override {
478 return false;
479 }
480
481 bool isExpr() const {
482 return Kind == Expression;
483 }
484
485 bool isSoppBrTarget() const {
486 return isExpr() || isImm();
487 }
488
489 bool isSWaitCnt() const;
490 bool isHwreg() const;
491 bool isSendMsg() const;
492 bool isSwizzle() const;
493 bool isSMRDOffset8() const;
494 bool isSMRDOffset20() const;
495 bool isSMRDLiteralOffset() const;
496 bool isDPPCtrl() const;
497 bool isGPRIdxMode() const;
498 bool isS16Imm() const;
499 bool isU16Imm() const;
500
501 StringRef getExpressionAsToken() const {
502 assert(isExpr())(static_cast <bool> (isExpr()) ? void (0) : __assert_fail
("isExpr()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 502, __extension__ __PRETTY_FUNCTION__))
;
503 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
504 return S->getSymbol().getName();
505 }
506
507 StringRef getToken() const {
508 assert(isToken())(static_cast <bool> (isToken()) ? void (0) : __assert_fail
("isToken()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 508, __extension__ __PRETTY_FUNCTION__))
;
509
510 if (Kind == Expression)
511 return getExpressionAsToken();
512
513 return StringRef(Tok.Data, Tok.Length);
514 }
515
516 int64_t getImm() const {
517 assert(isImm())(static_cast <bool> (isImm()) ? void (0) : __assert_fail
("isImm()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 517, __extension__ __PRETTY_FUNCTION__))
;
518 return Imm.Val;
519 }
520
521 ImmTy getImmTy() const {
522 assert(isImm())(static_cast <bool> (isImm()) ? void (0) : __assert_fail
("isImm()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 522, __extension__ __PRETTY_FUNCTION__))
;
523 return Imm.Type;
524 }
525
526 unsigned getReg() const override {
527 return Reg.RegNo;
528 }
529
530 SMLoc getStartLoc() const override {
531 return StartLoc;
532 }
533
534 SMLoc getEndLoc() const override {
535 return EndLoc;
536 }
537
538 Modifiers getModifiers() const {
539 assert(isRegKind() || isImmTy(ImmTyNone))(static_cast <bool> (isRegKind() || isImmTy(ImmTyNone))
? void (0) : __assert_fail ("isRegKind() || isImmTy(ImmTyNone)"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 539, __extension__ __PRETTY_FUNCTION__))
;
540 return isRegKind() ? Reg.Mods : Imm.Mods;
541 }
542
543 void setModifiers(Modifiers Mods) {
544 assert(isRegKind() || isImmTy(ImmTyNone))(static_cast <bool> (isRegKind() || isImmTy(ImmTyNone))
? void (0) : __assert_fail ("isRegKind() || isImmTy(ImmTyNone)"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 544, __extension__ __PRETTY_FUNCTION__))
;
545 if (isRegKind())
546 Reg.Mods = Mods;
547 else
548 Imm.Mods = Mods;
549 }
550
551 bool hasModifiers() const {
552 return getModifiers().hasModifiers();
553 }
554
555 bool hasFPModifiers() const {
556 return getModifiers().hasFPModifiers();
557 }
558
559 bool hasIntModifiers() const {
560 return getModifiers().hasIntModifiers();
561 }
562
563 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
564
565 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
566
567 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
568
569 template <unsigned Bitwidth>
570 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
571
572 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
573 addKImmFPOperands<16>(Inst, N);
574 }
575
576 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
577 addKImmFPOperands<32>(Inst, N);
578 }
579
580 void addRegOperands(MCInst &Inst, unsigned N) const;
581
582 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
583 if (isRegKind())
584 addRegOperands(Inst, N);
585 else if (isExpr())
586 Inst.addOperand(MCOperand::createExpr(Expr));
587 else
588 addImmOperands(Inst, N);
589 }
590
591 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
592 Modifiers Mods = getModifiers();
593 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
594 if (isRegKind()) {
595 addRegOperands(Inst, N);
596 } else {
597 addImmOperands(Inst, N, false);
598 }
599 }
600
601 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
602 assert(!hasIntModifiers())(static_cast <bool> (!hasIntModifiers()) ? void (0) : __assert_fail
("!hasIntModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 602, __extension__ __PRETTY_FUNCTION__))
;
603 addRegOrImmWithInputModsOperands(Inst, N);
604 }
605
606 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
607 assert(!hasFPModifiers())(static_cast <bool> (!hasFPModifiers()) ? void (0) : __assert_fail
("!hasFPModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 607, __extension__ __PRETTY_FUNCTION__))
;
608 addRegOrImmWithInputModsOperands(Inst, N);
609 }
610
611 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
612 Modifiers Mods = getModifiers();
613 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
614 assert(isRegKind())(static_cast <bool> (isRegKind()) ? void (0) : __assert_fail
("isRegKind()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 614, __extension__ __PRETTY_FUNCTION__))
;
615 addRegOperands(Inst, N);
616 }
617
618 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
619 assert(!hasIntModifiers())(static_cast <bool> (!hasIntModifiers()) ? void (0) : __assert_fail
("!hasIntModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 619, __extension__ __PRETTY_FUNCTION__))
;
620 addRegWithInputModsOperands(Inst, N);
621 }
622
623 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
624 assert(!hasFPModifiers())(static_cast <bool> (!hasFPModifiers()) ? void (0) : __assert_fail
("!hasFPModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 624, __extension__ __PRETTY_FUNCTION__))
;
625 addRegWithInputModsOperands(Inst, N);
626 }
627
628 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
629 if (isImm())
630 addImmOperands(Inst, N);
631 else {
632 assert(isExpr())(static_cast <bool> (isExpr()) ? void (0) : __assert_fail
("isExpr()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 632, __extension__ __PRETTY_FUNCTION__))
;
633 Inst.addOperand(MCOperand::createExpr(Expr));
634 }
635 }
636
637 static void printImmTy(raw_ostream& OS, ImmTy Type) {
638 switch (Type) {
639 case ImmTyNone: OS << "None"; break;
640 case ImmTyGDS: OS << "GDS"; break;
641 case ImmTyOffen: OS << "Offen"; break;
642 case ImmTyIdxen: OS << "Idxen"; break;
643 case ImmTyAddr64: OS << "Addr64"; break;
644 case ImmTyOffset: OS << "Offset"; break;
645 case ImmTyOffset0: OS << "Offset0"; break;
646 case ImmTyOffset1: OS << "Offset1"; break;
647 case ImmTyGLC: OS << "GLC"; break;
648 case ImmTySLC: OS << "SLC"; break;
649 case ImmTyTFE: OS << "TFE"; break;
650 case ImmTyDFMT: OS << "DFMT"; break;
651 case ImmTyNFMT: OS << "NFMT"; break;
652 case ImmTyClampSI: OS << "ClampSI"; break;
653 case ImmTyOModSI: OS << "OModSI"; break;
654 case ImmTyDppCtrl: OS << "DppCtrl"; break;
655 case ImmTyDppRowMask: OS << "DppRowMask"; break;
656 case ImmTyDppBankMask: OS << "DppBankMask"; break;
657 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
658 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
659 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
660 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
661 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
662 case ImmTyDMask: OS << "DMask"; break;
663 case ImmTyUNorm: OS << "UNorm"; break;
664 case ImmTyDA: OS << "DA"; break;
665 case ImmTyR128: OS << "R128"; break;
666 case ImmTyLWE: OS << "LWE"; break;
667 case ImmTyOff: OS << "Off"; break;
668 case ImmTyExpTgt: OS << "ExpTgt"; break;
669 case ImmTyExpCompr: OS << "ExpCompr"; break;
670 case ImmTyExpVM: OS << "ExpVM"; break;
671 case ImmTyHwreg: OS << "Hwreg"; break;
672 case ImmTySendMsg: OS << "SendMsg"; break;
673 case ImmTyInterpSlot: OS << "InterpSlot"; break;
674 case ImmTyInterpAttr: OS << "InterpAttr"; break;
675 case ImmTyAttrChan: OS << "AttrChan"; break;
676 case ImmTyOpSel: OS << "OpSel"; break;
677 case ImmTyOpSelHi: OS << "OpSelHi"; break;
678 case ImmTyNegLo: OS << "NegLo"; break;
679 case ImmTyNegHi: OS << "NegHi"; break;
680 case ImmTySwizzle: OS << "Swizzle"; break;
681 case ImmTyHigh: OS << "High"; break;
682 }
683 }
684
685 void print(raw_ostream &OS) const override {
686 switch (Kind) {
687 case Register:
688 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
689 break;
690 case Immediate:
691 OS << '<' << getImm();
692 if (getImmTy() != ImmTyNone) {
693 OS << " type: "; printImmTy(OS, getImmTy());
694 }
695 OS << " mods: " << Imm.Mods << '>';
696 break;
697 case Token:
698 OS << '\'' << getToken() << '\'';
699 break;
700 case Expression:
701 OS << "<expr " << *Expr << '>';
702 break;
703 }
704 }
705
706 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
707 int64_t Val, SMLoc Loc,
708 ImmTy Type = ImmTyNone,
709 bool IsFPImm = false) {
710 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
711 Op->Imm.Val = Val;
712 Op->Imm.IsFPImm = IsFPImm;
713 Op->Imm.Type = Type;
714 Op->Imm.Mods = Modifiers();
715 Op->StartLoc = Loc;
716 Op->EndLoc = Loc;
717 return Op;
718 }
719
720 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
721 StringRef Str, SMLoc Loc,
722 bool HasExplicitEncodingSize = true) {
723 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
724 Res->Tok.Data = Str.data();
725 Res->Tok.Length = Str.size();
726 Res->StartLoc = Loc;
727 Res->EndLoc = Loc;
728 return Res;
729 }
730
731 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
732 unsigned RegNo, SMLoc S,
733 SMLoc E,
734 bool ForceVOP3) {
735 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
736 Op->Reg.RegNo = RegNo;
737 Op->Reg.Mods = Modifiers();
738 Op->Reg.IsForcedVOP3 = ForceVOP3;
739 Op->StartLoc = S;
740 Op->EndLoc = E;
741 return Op;
742 }
743
744 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
745 const class MCExpr *Expr, SMLoc S) {
746 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
747 Op->Expr = Expr;
748 Op->StartLoc = S;
749 Op->EndLoc = S;
750 return Op;
751 }
752};
753
754raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
755 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
756 return OS;
757}
758
759//===----------------------------------------------------------------------===//
760// AsmParser
761//===----------------------------------------------------------------------===//
762
763// Holds info related to the current kernel, e.g. count of SGPRs used.
764// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
765// .amdgpu_hsa_kernel or at EOF.
766class KernelScopeInfo {
767 int SgprIndexUnusedMin = -1;
768 int VgprIndexUnusedMin = -1;
769 MCContext *Ctx = nullptr;
770
771 void usesSgprAt(int i) {
772 if (i >= SgprIndexUnusedMin) {
773 SgprIndexUnusedMin = ++i;
774 if (Ctx) {
775 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
776 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
777 }
778 }
779 }
780
781 void usesVgprAt(int i) {
782 if (i >= VgprIndexUnusedMin) {
783 VgprIndexUnusedMin = ++i;
784 if (Ctx) {
785 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
786 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
787 }
788 }
789 }
790
791public:
792 KernelScopeInfo() = default;
793
794 void initialize(MCContext &Context) {
795 Ctx = &Context;
796 usesSgprAt(SgprIndexUnusedMin = -1);
797 usesVgprAt(VgprIndexUnusedMin = -1);
798 }
799
800 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
801 switch (RegKind) {
802 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
803 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
804 default: break;
805 }
806 }
807};
808
809class AMDGPUAsmParser : public MCTargetAsmParser {
810 MCAsmParser &Parser;
811
812 unsigned ForcedEncodingSize = 0;
813 bool ForcedDPP = false;
814 bool ForcedSDWA = false;
815 KernelScopeInfo KernelScope;
816
817 /// @name Auto-generated Match Functions
818 /// {
819
820#define GET_ASSEMBLER_HEADER
821#include "AMDGPUGenAsmMatcher.inc"
822
823 /// }
824
825private:
826 bool ParseAsAbsoluteExpression(uint32_t &Ret);
827 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
828 bool ParseDirectiveHSACodeObjectVersion();
829 bool ParseDirectiveHSACodeObjectISA();
830 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
831 bool ParseDirectiveAMDKernelCodeT();
832 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
833 bool ParseDirectiveAMDGPUHsaKernel();
834
835 bool ParseDirectiveISAVersion();
836 bool ParseDirectiveHSAMetadata();
837 bool ParseDirectivePALMetadata();
838
839 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
840 RegisterKind RegKind, unsigned Reg1,
841 unsigned RegNum);
842 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
843 unsigned& RegNum, unsigned& RegWidth,
844 unsigned *DwordRegIndex);
845 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
846 bool IsAtomic, bool IsAtomicReturn);
847 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
848 bool IsGdsHardcoded);
849
850public:
851 enum AMDGPUMatchResultTy {
852 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
853 };
854
855 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
856
857 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
858 const MCInstrInfo &MII,
859 const MCTargetOptions &Options)
860 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
861 MCAsmParserExtension::Initialize(Parser);
862
863 if (getFeatureBits().none()) {
864 // Set default features.
865 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
866 }
867
868 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
869
870 {
871 // TODO: make those pre-defined variables read-only.
872 // Currently there is none suitable machinery in the core llvm-mc for this.
873 // MCSymbol::isRedefinable is intended for another purpose, and
874 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
875 AMDGPU::IsaInfo::IsaVersion ISA =
876 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
877 MCContext &Ctx = getContext();
878 MCSymbol *Sym =
879 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
880 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
881 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
882 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
883 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
884 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
885 }
886 KernelScope.initialize(getContext());
887 }
888
889 bool isSI() const {
890 return AMDGPU::isSI(getSTI());
891 }
892
893 bool isCI() const {
894 return AMDGPU::isCI(getSTI());
895 }
896
897 bool isVI() const {
898 return AMDGPU::isVI(getSTI());
899 }
900
901 bool isGFX9() const {
902 return AMDGPU::isGFX9(getSTI());
903 }
904
905 bool hasInv2PiInlineImm() const {
906 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
907 }
908
909 bool hasFlatOffsets() const {
910 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
911 }
912
913 bool hasSGPR102_SGPR103() const {
914 return !isVI();
915 }
916
917 bool hasIntClamp() const {
918 return getFeatureBits()[AMDGPU::FeatureIntClamp];
919 }
920
921 AMDGPUTargetStreamer &getTargetStreamer() {
922 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
923 return static_cast<AMDGPUTargetStreamer &>(TS);
924 }
925
926 const MCRegisterInfo *getMRI() const {
927 // We need this const_cast because for some reason getContext() is not const
928 // in MCAsmParser.
929 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
930 }
931
932 const MCInstrInfo *getMII() const {
933 return &MII;
934 }
935
936 const FeatureBitset &getFeatureBits() const {
937 return getSTI().getFeatureBits();
938 }
939
940 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
941 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
942 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
943
944 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
945 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
946 bool isForcedDPP() const { return ForcedDPP; }
947 bool isForcedSDWA() const { return ForcedSDWA; }
948 ArrayRef<unsigned> getMatchedVariants() const;
949
950 std::unique_ptr<AMDGPUOperand> parseRegister();
951 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
952 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
953 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
954 unsigned Kind) override;
955 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
956 OperandVector &Operands, MCStreamer &Out,
957 uint64_t &ErrorInfo,
958 bool MatchingInlineAsm) override;
959 bool ParseDirective(AsmToken DirectiveID) override;
960 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
961 StringRef parseMnemonicSuffix(StringRef Name);
962 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
963 SMLoc NameLoc, OperandVector &Operands) override;
964 //bool ProcessInstruction(MCInst &Inst);
965
966 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
967
968 OperandMatchResultTy
969 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
970 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
971 bool (*ConvertResult)(int64_t &) = nullptr);
972
973 OperandMatchResultTy parseOperandArrayWithPrefix(
974 const char *Prefix,
975 OperandVector &Operands,
976 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
977 bool (*ConvertResult)(int64_t&) = nullptr);
978
979 OperandMatchResultTy
980 parseNamedBit(const char *Name, OperandVector &Operands,
981 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
982 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
983 StringRef &Value);
984
985 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
986 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
987 OperandMatchResultTy parseReg(OperandVector &Operands);
988 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
989 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
990 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
991 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
992 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
993 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
994
995 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
996 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
997 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
998 void cvtExp(MCInst &Inst, const OperandVector &Operands);
999
1000 bool parseCnt(int64_t &IntVal);
1001 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
1002 OperandMatchResultTy parseHwreg(OperandVector &Operands);
1003
1004private:
1005 struct OperandInfoTy {
1006 int64_t Id;
1007 bool IsSymbolic = false;
1008
1009 OperandInfoTy(int64_t Id_) : Id(Id_) {}
1010 };
1011
1012 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1013 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
1014
1015 void errorExpTgt();
1016 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1017
1018 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1019 bool validateConstantBusLimitations(const MCInst &Inst);
1020 bool validateEarlyClobberLimitations(const MCInst &Inst);
1021 bool validateIntClampSupported(const MCInst &Inst);
1022 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1023 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1024 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
1025
1026 bool trySkipId(const StringRef Id);
1027 bool trySkipToken(const AsmToken::TokenKind Kind);
1028 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1029 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1030 bool parseExpr(int64_t &Imm);
1031
1032public:
1033 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
1034
1035 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
1036 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
1037 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1038 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
1039 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1040
1041 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1042 const unsigned MinVal,
1043 const unsigned MaxVal,
1044 const StringRef ErrMsg);
1045 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1046 bool parseSwizzleOffset(int64_t &Imm);
1047 bool parseSwizzleMacro(int64_t &Imm);
1048 bool parseSwizzleQuadPerm(int64_t &Imm);
1049 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1050 bool parseSwizzleBroadcast(int64_t &Imm);
1051 bool parseSwizzleSwap(int64_t &Imm);
1052 bool parseSwizzleReverse(int64_t &Imm);
1053
1054 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1055 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1056 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
1057 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1058
1059 AMDGPUOperand::Ptr defaultGLC() const;
1060 AMDGPUOperand::Ptr defaultSLC() const;
1061 AMDGPUOperand::Ptr defaultTFE() const;
1062
1063 AMDGPUOperand::Ptr defaultDMask() const;
1064 AMDGPUOperand::Ptr defaultUNorm() const;
1065 AMDGPUOperand::Ptr defaultDA() const;
1066 AMDGPUOperand::Ptr defaultR128() const;
1067 AMDGPUOperand::Ptr defaultLWE() const;
1068 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1069 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
1070 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
1071 AMDGPUOperand::Ptr defaultOffsetU12() const;
1072 AMDGPUOperand::Ptr defaultOffsetS13() const;
1073
1074 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1075
1076 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1077 OptionalImmIndexMap &OptionalIdx);
1078 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
1079 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
1080 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
1081
1082 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1083
1084 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1085 bool IsAtomic = false);
1086 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
1087
1088 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
1089 AMDGPUOperand::Ptr defaultRowMask() const;
1090 AMDGPUOperand::Ptr defaultBankMask() const;
1091 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1092 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
1093
1094 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1095 AMDGPUOperand::ImmTy Type);
1096 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
1097 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1098 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
1099 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
1100 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1101 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
1102 uint64_t BasicInstType, bool skipVcc = false);
1103};
1104
1105struct OptionalOperand {
1106 const char *Name;
1107 AMDGPUOperand::ImmTy Type;
1108 bool IsBit;
1109 bool (*ConvertResult)(int64_t&);
1110};
1111
1112} // end anonymous namespace
1113
1114// May be called with integer type with equivalent bitwidth.
1115static const fltSemantics *getFltSemantics(unsigned Size) {
1116 switch (Size) {
1117 case 4:
1118 return &APFloat::IEEEsingle();
1119 case 8:
1120 return &APFloat::IEEEdouble();
1121 case 2:
1122 return &APFloat::IEEEhalf();
1123 default:
1124 llvm_unreachable("unsupported fp type")::llvm::llvm_unreachable_internal("unsupported fp type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1124)
;
1125 }
1126}
1127
1128static const fltSemantics *getFltSemantics(MVT VT) {
1129 return getFltSemantics(VT.getSizeInBits() / 8);
1130}
1131
1132static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1133 switch (OperandType) {
1134 case AMDGPU::OPERAND_REG_IMM_INT32:
1135 case AMDGPU::OPERAND_REG_IMM_FP32:
1136 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1137 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1138 return &APFloat::IEEEsingle();
1139 case AMDGPU::OPERAND_REG_IMM_INT64:
1140 case AMDGPU::OPERAND_REG_IMM_FP64:
1141 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1142 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1143 return &APFloat::IEEEdouble();
1144 case AMDGPU::OPERAND_REG_IMM_INT16:
1145 case AMDGPU::OPERAND_REG_IMM_FP16:
1146 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1147 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1148 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1149 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1150 return &APFloat::IEEEhalf();
1151 default:
1152 llvm_unreachable("unsupported fp type")::llvm::llvm_unreachable_internal("unsupported fp type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1152)
;
1153 }
1154}
1155
1156//===----------------------------------------------------------------------===//
1157// Operand
1158//===----------------------------------------------------------------------===//
1159
1160static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1161 bool Lost;
1162
1163 // Convert literal to single precision
1164 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1165 APFloat::rmNearestTiesToEven,
1166 &Lost);
1167 // We allow precision lost but not overflow or underflow
1168 if (Status != APFloat::opOK &&
1169 Lost &&
1170 ((Status & APFloat::opOverflow) != 0 ||
1171 (Status & APFloat::opUnderflow) != 0)) {
1172 return false;
1173 }
1174
1175 return true;
1176}
1177
1178bool AMDGPUOperand::isInlinableImm(MVT type) const {
1179 if (!isImmTy(ImmTyNone)) {
1180 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1181 return false;
1182 }
1183 // TODO: We should avoid using host float here. It would be better to
1184 // check the float bit values which is what a few other places do.
1185 // We've had bot failures before due to weird NaN support on mips hosts.
1186
1187 APInt Literal(64, Imm.Val);
1188
1189 if (Imm.IsFPImm) { // We got fp literal token
1190 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
1191 return AMDGPU::isInlinableLiteral64(Imm.Val,
1192 AsmParser->hasInv2PiInlineImm());
1193 }
1194
1195 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
1196 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1197 return false;
1198
1199 if (type.getScalarSizeInBits() == 16) {
1200 return AMDGPU::isInlinableLiteral16(
1201 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
1202 AsmParser->hasInv2PiInlineImm());
1203 }
1204
1205 // Check if single precision literal is inlinable
1206 return AMDGPU::isInlinableLiteral32(
1207 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
1208 AsmParser->hasInv2PiInlineImm());
1209 }
1210
1211 // We got int literal token.
1212 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
1213 return AMDGPU::isInlinableLiteral64(Imm.Val,
1214 AsmParser->hasInv2PiInlineImm());
1215 }
1216
1217 if (type.getScalarSizeInBits() == 16) {
1218 return AMDGPU::isInlinableLiteral16(
1219 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1220 AsmParser->hasInv2PiInlineImm());
1221 }
1222
1223 return AMDGPU::isInlinableLiteral32(
1224 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
1225 AsmParser->hasInv2PiInlineImm());
1226}
1227
1228bool AMDGPUOperand::isLiteralImm(MVT type) const {
1229 // Check that this immediate can be added as literal
1230 if (!isImmTy(ImmTyNone)) {
1231 return false;
1232 }
1233
1234 if (!Imm.IsFPImm) {
1235 // We got int literal token.
1236
1237 if (type == MVT::f64 && hasFPModifiers()) {
1238 // Cannot apply fp modifiers to int literals preserving the same semantics
1239 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1240 // disable these cases.
1241 return false;
1242 }
1243
1244 unsigned Size = type.getSizeInBits();
1245 if (Size == 64)
1246 Size = 32;
1247
1248 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1249 // types.
1250 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
1251 }
1252
1253 // We got fp literal token
1254 if (type == MVT::f64) { // Expected 64-bit fp operand
1255 // We would set low 64-bits of literal to zeroes but we accept this literals
1256 return true;
1257 }
1258
1259 if (type == MVT::i64) { // Expected 64-bit int operand
1260 // We don't allow fp literals in 64-bit integer instructions. It is
1261 // unclear how we should encode them.
1262 return false;
1263 }
1264
1265 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
1266 return canLosslesslyConvertToFPType(FPLiteral, type);
1267}
1268
1269bool AMDGPUOperand::isRegClass(unsigned RCID) const {
1270 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
1271}
1272
1273bool AMDGPUOperand::isSDWARegKind() const {
1274 if (AsmParser->isVI())
1275 return isVReg();
1276 else if (AsmParser->isGFX9())
1277 return isRegKind();
1278 else
1279 return false;
1280}
1281
1282uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1283{
1284 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers())(static_cast <bool> (isImmTy(ImmTyNone) && Imm.
Mods.hasFPModifiers()) ? void (0) : __assert_fail ("isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers()"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1284, __extension__ __PRETTY_FUNCTION__))
;
1285 assert(Size == 2 || Size == 4 || Size == 8)(static_cast <bool> (Size == 2 || Size == 4 || Size == 8
) ? void (0) : __assert_fail ("Size == 2 || Size == 4 || Size == 8"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1285, __extension__ __PRETTY_FUNCTION__))
;
1286
1287 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1288
1289 if (Imm.Mods.Abs) {
1290 Val &= ~FpSignMask;
1291 }
1292 if (Imm.Mods.Neg) {
1293 Val ^= FpSignMask;
1294 }
1295
1296 return Val;
1297}
1298
1299void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
1300 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1301 Inst.getNumOperands())) {
1302 addLiteralImmOperand(Inst, Imm.Val,
1303 ApplyModifiers &
1304 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1305 } else {
1306 assert(!isImmTy(ImmTyNone) || !hasModifiers())(static_cast <bool> (!isImmTy(ImmTyNone) || !hasModifiers
()) ? void (0) : __assert_fail ("!isImmTy(ImmTyNone) || !hasModifiers()"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1306, __extension__ __PRETTY_FUNCTION__))
;
1307 Inst.addOperand(MCOperand::createImm(Imm.Val));
1308 }
1309}
1310
1311void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
1312 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1313 auto OpNum = Inst.getNumOperands();
1314 // Check that this operand accepts literals
1315 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum))(static_cast <bool> (AMDGPU::isSISrcOperand(InstDesc, OpNum
)) ? void (0) : __assert_fail ("AMDGPU::isSISrcOperand(InstDesc, OpNum)"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1315, __extension__ __PRETTY_FUNCTION__))
;
1316
1317 if (ApplyModifiers) {
1318 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum))(static_cast <bool> (AMDGPU::isSISrcFPOperand(InstDesc,
OpNum)) ? void (0) : __assert_fail ("AMDGPU::isSISrcFPOperand(InstDesc, OpNum)"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1318, __extension__ __PRETTY_FUNCTION__))
;
1319 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1320 Val = applyInputFPModifiers(Val, Size);
1321 }
1322
1323 APInt Literal(64, Val);
1324 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
1325
1326 if (Imm.IsFPImm) { // We got fp literal token
1327 switch (OpTy) {
1328 case AMDGPU::OPERAND_REG_IMM_INT64:
1329 case AMDGPU::OPERAND_REG_IMM_FP64:
1330 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1331 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1332 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1333 AsmParser->hasInv2PiInlineImm())) {
1334 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
1335 return;
1336 }
1337
1338 // Non-inlineable
1339 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
1340 // For fp operands we check if low 32 bits are zeros
1341 if (Literal.getLoBits(32) != 0) {
1342 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
1343 "Can't encode literal as exact 64-bit floating-point operand. "
1344 "Low 32-bits will be set to zero");
1345 }
1346
1347 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
1348 return;
1349 }
1350
1351 // We don't allow fp literals in 64-bit integer instructions. It is
1352 // unclear how we should encode them. This case should be checked earlier
1353 // in predicate methods (isLiteralImm())
1354 llvm_unreachable("fp literal in 64-bit integer instruction.")::llvm::llvm_unreachable_internal("fp literal in 64-bit integer instruction."
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1354)
;
1355
1356 case AMDGPU::OPERAND_REG_IMM_INT32:
1357 case AMDGPU::OPERAND_REG_IMM_FP32:
1358 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1359 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1360 case AMDGPU::OPERAND_REG_IMM_INT16:
1361 case AMDGPU::OPERAND_REG_IMM_FP16:
1362 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1363 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1364 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1365 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1366 bool lost;
1367 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
1368 // Convert literal to single precision
1369 FPLiteral.convert(*getOpFltSemantics(OpTy),
1370 APFloat::rmNearestTiesToEven, &lost);
1371 // We allow precision lost but not overflow or underflow. This should be
1372 // checked earlier in isLiteralImm()
1373
1374 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1375 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1376 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1377 ImmVal |= (ImmVal << 16);
1378 }
1379
1380 Inst.addOperand(MCOperand::createImm(ImmVal));
1381 return;
1382 }
1383 default:
1384 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1384)
;
1385 }
1386
1387 return;
1388 }
1389
1390 // We got int literal token.
1391 // Only sign extend inline immediates.
1392 // FIXME: No errors on truncation
1393 switch (OpTy) {
1394 case AMDGPU::OPERAND_REG_IMM_INT32:
1395 case AMDGPU::OPERAND_REG_IMM_FP32:
1396 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1397 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1398 if (isInt<32>(Val) &&
1399 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1400 AsmParser->hasInv2PiInlineImm())) {
1401 Inst.addOperand(MCOperand::createImm(Val));
1402 return;
1403 }
1404
1405 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1406 return;
1407
1408 case AMDGPU::OPERAND_REG_IMM_INT64:
1409 case AMDGPU::OPERAND_REG_IMM_FP64:
1410 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1411 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1412 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
1413 Inst.addOperand(MCOperand::createImm(Val));
1414 return;
1415 }
1416
1417 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1418 return;
1419
1420 case AMDGPU::OPERAND_REG_IMM_INT16:
1421 case AMDGPU::OPERAND_REG_IMM_FP16:
1422 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1423 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1424 if (isInt<16>(Val) &&
1425 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1426 AsmParser->hasInv2PiInlineImm())) {
1427 Inst.addOperand(MCOperand::createImm(Val));
1428 return;
1429 }
1430
1431 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1432 return;
1433
1434 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1435 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1436 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1437 assert(AMDGPU::isInlinableLiteral16(LiteralVal,(static_cast <bool> (AMDGPU::isInlinableLiteral16(LiteralVal
, AsmParser->hasInv2PiInlineImm())) ? void (0) : __assert_fail
("AMDGPU::isInlinableLiteral16(LiteralVal, AsmParser->hasInv2PiInlineImm())"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1438, __extension__ __PRETTY_FUNCTION__))
1438 AsmParser->hasInv2PiInlineImm()))(static_cast <bool> (AMDGPU::isInlinableLiteral16(LiteralVal
, AsmParser->hasInv2PiInlineImm())) ? void (0) : __assert_fail
("AMDGPU::isInlinableLiteral16(LiteralVal, AsmParser->hasInv2PiInlineImm())"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1438, __extension__ __PRETTY_FUNCTION__))
;
1439
1440 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1441 static_cast<uint32_t>(LiteralVal);
1442 Inst.addOperand(MCOperand::createImm(ImmVal));
1443 return;
1444 }
1445 default:
1446 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1446)
;
1447 }
1448}
1449
1450template <unsigned Bitwidth>
1451void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
1452 APInt Literal(64, Imm.Val);
1453
1454 if (!Imm.IsFPImm) {
1455 // We got int literal token.
1456 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1457 return;
1458 }
1459
1460 bool Lost;
1461 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
1462 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1463 APFloat::rmNearestTiesToEven, &Lost);
1464 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
1465}
1466
1467void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1468 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1469}
1470
1471//===----------------------------------------------------------------------===//
1472// AsmParser
1473//===----------------------------------------------------------------------===//
1474
1475static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1476 if (Is == IS_VGPR) {
1477 switch (RegWidth) {
1478 default: return -1;
1479 case 1: return AMDGPU::VGPR_32RegClassID;
1480 case 2: return AMDGPU::VReg_64RegClassID;
1481 case 3: return AMDGPU::VReg_96RegClassID;
1482 case 4: return AMDGPU::VReg_128RegClassID;
1483 case 8: return AMDGPU::VReg_256RegClassID;
1484 case 16: return AMDGPU::VReg_512RegClassID;
1485 }
1486 } else if (Is == IS_TTMP) {
1487 switch (RegWidth) {
1488 default: return -1;
1489 case 1: return AMDGPU::TTMP_32RegClassID;
1490 case 2: return AMDGPU::TTMP_64RegClassID;
1491 case 4: return AMDGPU::TTMP_128RegClassID;
1492 }
1493 } else if (Is == IS_SGPR) {
1494 switch (RegWidth) {
1495 default: return -1;
1496 case 1: return AMDGPU::SGPR_32RegClassID;
1497 case 2: return AMDGPU::SGPR_64RegClassID;
1498 case 4: return AMDGPU::SGPR_128RegClassID;
1499 case 8: return AMDGPU::SReg_256RegClassID;
1500 case 16: return AMDGPU::SReg_512RegClassID;
1501 }
1502 }
1503 return -1;
1504}
1505
1506static unsigned getSpecialRegForName(StringRef RegName) {
1507 return StringSwitch<unsigned>(RegName)
1508 .Case("exec", AMDGPU::EXEC)
1509 .Case("vcc", AMDGPU::VCC)
1510 .Case("flat_scratch", AMDGPU::FLAT_SCR)
1511 .Case("m0", AMDGPU::M0)
1512 .Case("scc", AMDGPU::SCC)
1513 .Case("tba", AMDGPU::TBA)
1514 .Case("tma", AMDGPU::TMA)
1515 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1516 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
1517 .Case("vcc_lo", AMDGPU::VCC_LO)
1518 .Case("vcc_hi", AMDGPU::VCC_HI)
1519 .Case("exec_lo", AMDGPU::EXEC_LO)
1520 .Case("exec_hi", AMDGPU::EXEC_HI)
1521 .Case("tma_lo", AMDGPU::TMA_LO)
1522 .Case("tma_hi", AMDGPU::TMA_HI)
1523 .Case("tba_lo", AMDGPU::TBA_LO)
1524 .Case("tba_hi", AMDGPU::TBA_HI)
1525 .Default(0);
1526}
1527
1528bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1529 SMLoc &EndLoc) {
1530 auto R = parseRegister();
1531 if (!R) return true;
1532 assert(R->isReg())(static_cast <bool> (R->isReg()) ? void (0) : __assert_fail
("R->isReg()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1532, __extension__ __PRETTY_FUNCTION__))
;
1533 RegNo = R->getReg();
1534 StartLoc = R->getStartLoc();
1535 EndLoc = R->getEndLoc();
1536 return false;
1537}
1538
1539bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1540 RegisterKind RegKind, unsigned Reg1,
1541 unsigned RegNum) {
1542 switch (RegKind) {
1543 case IS_SPECIAL:
1544 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1545 Reg = AMDGPU::EXEC;
1546 RegWidth = 2;
1547 return true;
1548 }
1549 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1550 Reg = AMDGPU::FLAT_SCR;
1551 RegWidth = 2;
1552 return true;
1553 }
1554 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1555 Reg = AMDGPU::VCC;
1556 RegWidth = 2;
1557 return true;
1558 }
1559 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1560 Reg = AMDGPU::TBA;
1561 RegWidth = 2;
1562 return true;
1563 }
1564 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1565 Reg = AMDGPU::TMA;
1566 RegWidth = 2;
1567 return true;
1568 }
1569 return false;
1570 case IS_VGPR:
1571 case IS_SGPR:
1572 case IS_TTMP:
1573 if (Reg1 != Reg + RegWidth) {
1574 return false;
1575 }
1576 RegWidth++;
1577 return true;
1578 default:
1579 llvm_unreachable("unexpected register kind")::llvm::llvm_unreachable_internal("unexpected register kind",
"/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1579)
;
1580 }
1581}
1582
1583bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1584 unsigned &RegNum, unsigned &RegWidth,
1585 unsigned *DwordRegIndex) {
1586 if (DwordRegIndex) { *DwordRegIndex = 0; }
1587 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1588 if (getLexer().is(AsmToken::Identifier)) {
1589 StringRef RegName = Parser.getTok().getString();
1590 if ((Reg = getSpecialRegForName(RegName))) {
1591 Parser.Lex();
1592 RegKind = IS_SPECIAL;
1593 } else {
1594 unsigned RegNumIndex = 0;
1595 if (RegName[0] == 'v') {
1596 RegNumIndex = 1;
1597 RegKind = IS_VGPR;
1598 } else if (RegName[0] == 's') {
1599 RegNumIndex = 1;
1600 RegKind = IS_SGPR;
1601 } else if (RegName.startswith("ttmp")) {
1602 RegNumIndex = strlen("ttmp");
1603 RegKind = IS_TTMP;
1604 } else {
1605 return false;
1606 }
1607 if (RegName.size() > RegNumIndex) {
1608 // Single 32-bit register: vXX.
1609 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1610 return false;
1611 Parser.Lex();
1612 RegWidth = 1;
1613 } else {
1614 // Range of registers: v[XX:YY]. ":YY" is optional.
1615 Parser.Lex();
1616 int64_t RegLo, RegHi;
1617 if (getLexer().isNot(AsmToken::LBrac))
1618 return false;
1619 Parser.Lex();
1620
1621 if (getParser().parseAbsoluteExpression(RegLo))
1622 return false;
1623
1624 const bool isRBrace = getLexer().is(AsmToken::RBrac);
1625 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1626 return false;
1627 Parser.Lex();
1628
1629 if (isRBrace) {
1630 RegHi = RegLo;
1631 } else {
1632 if (getParser().parseAbsoluteExpression(RegHi))
1633 return false;
1634
1635 if (getLexer().isNot(AsmToken::RBrac))
1636 return false;
1637 Parser.Lex();
1638 }
1639 RegNum = (unsigned) RegLo;
1640 RegWidth = (RegHi - RegLo) + 1;
1641 }
1642 }
1643 } else if (getLexer().is(AsmToken::LBrac)) {
1644 // List of consecutive registers: [s0,s1,s2,s3]
1645 Parser.Lex();
1646 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
1647 return false;
1648 if (RegWidth != 1)
1649 return false;
1650 RegisterKind RegKind1;
1651 unsigned Reg1, RegNum1, RegWidth1;
1652 do {
1653 if (getLexer().is(AsmToken::Comma)) {
1654 Parser.Lex();
1655 } else if (getLexer().is(AsmToken::RBrac)) {
1656 Parser.Lex();
1657 break;
1658 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
1659 if (RegWidth1 != 1) {
1660 return false;
1661 }
1662 if (RegKind1 != RegKind) {
1663 return false;
1664 }
1665 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1666 return false;
1667 }
1668 } else {
1669 return false;
1670 }
1671 } while (true);
1672 } else {
1673 return false;
1674 }
1675 switch (RegKind) {
1676 case IS_SPECIAL:
1677 RegNum = 0;
1678 RegWidth = 1;
1679 break;
1680 case IS_VGPR:
1681 case IS_SGPR:
1682 case IS_TTMP:
1683 {
1684 unsigned Size = 1;
1685 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1686 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
1687 Size = std::min(RegWidth, 4u);
1688 }
1689 if (RegNum % Size != 0)
1690 return false;
1691 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
1692 RegNum = RegNum / Size;
1693 int RCID = getRegClass(RegKind, RegWidth);
1694 if (RCID == -1)
1695 return false;
1696 const MCRegisterClass RC = TRI->getRegClass(RCID);
1697 if (RegNum >= RC.getNumRegs())
1698 return false;
1699 Reg = RC.getRegister(RegNum);
1700 break;
1701 }
1702
1703 default:
1704 llvm_unreachable("unexpected register kind")::llvm::llvm_unreachable_internal("unexpected register kind",
"/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1704)
;
1705 }
1706
1707 if (!subtargetHasRegister(*TRI, Reg))
1708 return false;
1709 return true;
1710}
1711
1712std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
1713 const auto &Tok = Parser.getTok();
1714 SMLoc StartLoc = Tok.getLoc();
1715 SMLoc EndLoc = Tok.getEndLoc();
1716 RegisterKind RegKind;
1717 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
1718
1719 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
1720 return nullptr;
1721 }
1722 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
1723 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
1724}
1725
1726bool
1727AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1728 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1729 (getLexer().getKind() == AsmToken::Integer ||
1730 getLexer().getKind() == AsmToken::Real)) {
1731 // This is a workaround for handling operands like these:
1732 // |1.0|
1733 // |-1|
1734 // This syntax is not compatible with syntax of standard
1735 // MC expressions (due to the trailing '|').
1736
1737 SMLoc EndLoc;
1738 const MCExpr *Expr;
1739
1740 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1741 return true;
1742 }
1743
1744 return !Expr->evaluateAsAbsolute(Val);
1745 }
1746
1747 return getParser().parseAbsoluteExpression(Val);
1748}
1749
1750OperandMatchResultTy
1751AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
1752 // TODO: add syntactic sugar for 1/(2*PI)
1753 bool Minus = false;
1754 if (getLexer().getKind() == AsmToken::Minus) {
1755 Minus = true;
1756 Parser.Lex();
1757 }
1758
1759 SMLoc S = Parser.getTok().getLoc();
1760 switch(getLexer().getKind()) {
1761 case AsmToken::Integer: {
1762 int64_t IntVal;
1763 if (parseAbsoluteExpr(IntVal, AbsMod))
1764 return MatchOperand_ParseFail;
1765 if (Minus)
1766 IntVal *= -1;
1767 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
1768 return MatchOperand_Success;
1769 }
1770 case AsmToken::Real: {
1771 int64_t IntVal;
1772 if (parseAbsoluteExpr(IntVal, AbsMod))
1773 return MatchOperand_ParseFail;
1774
1775 APFloat F(BitsToDouble(IntVal));
1776 if (Minus)
1777 F.changeSign();
1778 Operands.push_back(
1779 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
1780 AMDGPUOperand::ImmTyNone, true));
1781 return MatchOperand_Success;
1782 }
1783 default:
1784 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1785 }
1786}
1787
1788OperandMatchResultTy
1789AMDGPUAsmParser::parseReg(OperandVector &Operands) {
1790 if (auto R = parseRegister()) {
1791 assert(R->isReg())(static_cast <bool> (R->isReg()) ? void (0) : __assert_fail
("R->isReg()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1791, __extension__ __PRETTY_FUNCTION__))
;
1792 R->Reg.IsForcedVOP3 = isForcedVOP3();
1793 Operands.push_back(std::move(R));
1794 return MatchOperand_Success;
1795 }
1796 return MatchOperand_NoMatch;
1797}
1798
1799OperandMatchResultTy
1800AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1801 auto res = parseImm(Operands, AbsMod);
1802 if (res != MatchOperand_NoMatch) {
1803 return res;
1804 }
1805
1806 return parseReg(Operands);
1807}
1808
1809OperandMatchResultTy
1810AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1811 bool AllowImm) {
1812 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
1813
1814 if (getLexer().getKind()== AsmToken::Minus) {
1815 const AsmToken NextToken = getLexer().peekTok();
1816
1817 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1818 if (NextToken.is(AsmToken::Minus)) {
1819 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1820 return MatchOperand_ParseFail;
1821 }
1822
1823 // '-' followed by an integer literal N should be interpreted as integer
1824 // negation rather than a floating-point NEG modifier applied to N.
1825 // Beside being contr-intuitive, such use of floating-point NEG modifier
1826 // results in different meaning of integer literals used with VOP1/2/C
1827 // and VOP3, for example:
1828 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1829 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1830 // Negative fp literals should be handled likewise for unifomtity
1831 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1832 Parser.Lex();
1833 Negate = true;
1834 }
1835 }
1836
1837 if (getLexer().getKind() == AsmToken::Identifier &&
1838 Parser.getTok().getString() == "neg") {
1839 if (Negate) {
1840 Error(Parser.getTok().getLoc(), "expected register or immediate");
1841 return MatchOperand_ParseFail;
1842 }
1843 Parser.Lex();
1844 Negate2 = true;
1845 if (getLexer().isNot(AsmToken::LParen)) {
1846 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1847 return MatchOperand_ParseFail;
1848 }
1849 Parser.Lex();
1850 }
1851
1852 if (getLexer().getKind() == AsmToken::Identifier &&
1853 Parser.getTok().getString() == "abs") {
1854 Parser.Lex();
1855 Abs2 = true;
1856 if (getLexer().isNot(AsmToken::LParen)) {
1857 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1858 return MatchOperand_ParseFail;
1859 }
1860 Parser.Lex();
1861 }
1862
1863 if (getLexer().getKind() == AsmToken::Pipe) {
1864 if (Abs2) {
1865 Error(Parser.getTok().getLoc(), "expected register or immediate");
1866 return MatchOperand_ParseFail;
1867 }
1868 Parser.Lex();
1869 Abs = true;
1870 }
1871
1872 OperandMatchResultTy Res;
1873 if (AllowImm) {
1874 Res = parseRegOrImm(Operands, Abs);
1875 } else {
1876 Res = parseReg(Operands);
1877 }
1878 if (Res != MatchOperand_Success) {
1879 return Res;
1880 }
1881
1882 AMDGPUOperand::Modifiers Mods;
1883 if (Abs) {
1884 if (getLexer().getKind() != AsmToken::Pipe) {
1885 Error(Parser.getTok().getLoc(), "expected vertical bar");
1886 return MatchOperand_ParseFail;
1887 }
1888 Parser.Lex();
1889 Mods.Abs = true;
1890 }
1891 if (Abs2) {
1892 if (getLexer().isNot(AsmToken::RParen)) {
1893 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1894 return MatchOperand_ParseFail;
1895 }
1896 Parser.Lex();
1897 Mods.Abs = true;
1898 }
1899
1900 if (Negate) {
1901 Mods.Neg = true;
1902 } else if (Negate2) {
1903 if (getLexer().isNot(AsmToken::RParen)) {
1904 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1905 return MatchOperand_ParseFail;
1906 }
1907 Parser.Lex();
1908 Mods.Neg = true;
1909 }
1910
1911 if (Mods.hasFPModifiers()) {
1912 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1913 Op.setModifiers(Mods);
1914 }
1915 return MatchOperand_Success;
1916}
1917
1918OperandMatchResultTy
1919AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1920 bool AllowImm) {
1921 bool Sext = false;
1922
1923 if (getLexer().getKind() == AsmToken::Identifier &&
1924 Parser.getTok().getString() == "sext") {
1925 Parser.Lex();
1926 Sext = true;
1927 if (getLexer().isNot(AsmToken::LParen)) {
1928 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1929 return MatchOperand_ParseFail;
1930 }
1931 Parser.Lex();
1932 }
1933
1934 OperandMatchResultTy Res;
1935 if (AllowImm) {
1936 Res = parseRegOrImm(Operands);
1937 } else {
1938 Res = parseReg(Operands);
1939 }
1940 if (Res != MatchOperand_Success) {
1941 return Res;
1942 }
1943
1944 AMDGPUOperand::Modifiers Mods;
1945 if (Sext) {
1946 if (getLexer().isNot(AsmToken::RParen)) {
1947 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1948 return MatchOperand_ParseFail;
1949 }
1950 Parser.Lex();
1951 Mods.Sext = true;
1952 }
1953
1954 if (Mods.hasIntModifiers()) {
1955 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1956 Op.setModifiers(Mods);
1957 }
1958
1959 return MatchOperand_Success;
1960}
1961
1962OperandMatchResultTy
1963AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
1964 return parseRegOrImmWithFPInputMods(Operands, false);
1965}
1966
1967OperandMatchResultTy
1968AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
1969 return parseRegOrImmWithIntInputMods(Operands, false);
1970}
1971
1972OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1973 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1974 if (Reg) {
1975 Operands.push_back(std::move(Reg));
1976 return MatchOperand_Success;
1977 }
1978
1979 const AsmToken &Tok = Parser.getTok();
1980 if (Tok.getString() == "off") {
1981 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1982 AMDGPUOperand::ImmTyOff, false));
1983 Parser.Lex();
1984 return MatchOperand_Success;
1985 }
1986
1987 return MatchOperand_NoMatch;
1988}
1989
1990unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1991 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1992
1993 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
1994 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1995 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1996 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
1997 return Match_InvalidOperand;
1998
1999 if ((TSFlags & SIInstrFlags::VOP3) &&
2000 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2001 getForcedEncodingSize() != 64)
2002 return Match_PreferE32;
2003
2004 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2005 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
2006 // v_mac_f32/16 allow only dst_sel == DWORD;
2007 auto OpNum =
2008 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
2009 const auto &Op = Inst.getOperand(OpNum);
2010 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2011 return Match_InvalidOperand;
2012 }
2013 }
2014
2015 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2016 // FIXME: Produces error without correct column reported.
2017 auto OpNum =
2018 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2019 const auto &Op = Inst.getOperand(OpNum);
2020 if (Op.getImm() != 0)
2021 return Match_InvalidOperand;
2022 }
2023
2024 return Match_Success;
2025}
2026
2027// What asm variants we should check
2028ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2029 if (getForcedEncodingSize() == 32) {
2030 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2031 return makeArrayRef(Variants);
2032 }
2033
2034 if (isForcedVOP3()) {
2035 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2036 return makeArrayRef(Variants);
2037 }
2038
2039 if (isForcedSDWA()) {
2040 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2041 AMDGPUAsmVariants::SDWA9};
2042 return makeArrayRef(Variants);
2043 }
2044
2045 if (isForcedDPP()) {
2046 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2047 return makeArrayRef(Variants);
2048 }
2049
2050 static const unsigned Variants[] = {
2051 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
2052 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
2053 };
2054
2055 return makeArrayRef(Variants);
2056}
2057
2058unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2059 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2060 const unsigned Num = Desc.getNumImplicitUses();
2061 for (unsigned i = 0; i < Num; ++i) {
2062 unsigned Reg = Desc.ImplicitUses[i];
2063 switch (Reg) {
2064 case AMDGPU::FLAT_SCR:
2065 case AMDGPU::VCC:
2066 case AMDGPU::M0:
2067 return Reg;
2068 default:
2069 break;
2070 }
2071 }
2072 return AMDGPU::NoRegister;
2073}
2074
2075// NB: This code is correct only when used to check constant
2076// bus limitations because GFX7 support no f16 inline constants.
2077// Note that there are no cases when a GFX7 opcode violates
2078// constant bus limitations due to the use of an f16 constant.
2079bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2080 unsigned OpIdx) const {
2081 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2082
2083 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2084 return false;
2085 }
2086
2087 const MCOperand &MO = Inst.getOperand(OpIdx);
2088
2089 int64_t Val = MO.getImm();
2090 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2091
2092 switch (OpSize) { // expected operand size
2093 case 8:
2094 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2095 case 4:
2096 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2097 case 2: {
2098 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2099 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2100 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2101 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2102 } else {
2103 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2104 }
2105 }
2106 default:
2107 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2107)
;
2108 }
2109}
2110
2111bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2112 const MCOperand &MO = Inst.getOperand(OpIdx);
2113 if (MO.isImm()) {
2114 return !isInlineConstant(Inst, OpIdx);
2115 }
2116 return !MO.isReg() ||
2117 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
2118}
2119
2120bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
2121 const unsigned Opcode = Inst.getOpcode();
2122 const MCInstrDesc &Desc = MII.get(Opcode);
2123 unsigned ConstantBusUseCount = 0;
2124
2125 if (Desc.TSFlags &
2126 (SIInstrFlags::VOPC |
2127 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
2128 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2129 SIInstrFlags::SDWA)) {
2130 // Check special imm operands (used by madmk, etc)
2131 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2132 ++ConstantBusUseCount;
2133 }
2134
2135 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2136 if (SGPRUsed != AMDGPU::NoRegister) {
2137 ++ConstantBusUseCount;
2138 }
2139
2140 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2141 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2142 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2143
2144 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2145
2146 for (int OpIdx : OpIndices) {
2147 if (OpIdx == -1) break;
2148
2149 const MCOperand &MO = Inst.getOperand(OpIdx);
2150 if (usesConstantBus(Inst, OpIdx)) {
2151 if (MO.isReg()) {
2152 const unsigned Reg = mc2PseudoReg(MO.getReg());
2153 // Pairs of registers with a partial intersections like these
2154 // s0, s[0:1]
2155 // flat_scratch_lo, flat_scratch
2156 // flat_scratch_lo, flat_scratch_hi
2157 // are theoretically valid but they are disabled anyway.
2158 // Note that this code mimics SIInstrInfo::verifyInstruction
2159 if (Reg != SGPRUsed) {
2160 ++ConstantBusUseCount;
2161 }
2162 SGPRUsed = Reg;
2163 } else { // Expression or a literal
2164 ++ConstantBusUseCount;
2165 }
2166 }
2167 }
2168 }
2169
2170 return ConstantBusUseCount <= 1;
2171}
2172
2173bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
2174 const unsigned Opcode = Inst.getOpcode();
2175 const MCInstrDesc &Desc = MII.get(Opcode);
2176
2177 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2178 if (DstIdx == -1 ||
2179 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2180 return true;
2181 }
2182
2183 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2184
2185 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2186 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2187 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2188
2189 assert(DstIdx != -1)(static_cast <bool> (DstIdx != -1) ? void (0) : __assert_fail
("DstIdx != -1", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2189, __extension__ __PRETTY_FUNCTION__))
;
2190 const MCOperand &Dst = Inst.getOperand(DstIdx);
2191 assert(Dst.isReg())(static_cast <bool> (Dst.isReg()) ? void (0) : __assert_fail
("Dst.isReg()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2191, __extension__ __PRETTY_FUNCTION__))
;
2192 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2193
2194 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2195
2196 for (int SrcIdx : SrcIndices) {
2197 if (SrcIdx == -1) break;
2198 const MCOperand &Src = Inst.getOperand(SrcIdx);
2199 if (Src.isReg()) {
2200 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2201 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2202 return false;
2203 }
2204 }
2205 }
2206
2207 return true;
2208}
2209
2210bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2211
2212 const unsigned Opc = Inst.getOpcode();
2213 const MCInstrDesc &Desc = MII.get(Opc);
2214
2215 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2216 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2217 assert(ClampIdx != -1)(static_cast <bool> (ClampIdx != -1) ? void (0) : __assert_fail
("ClampIdx != -1", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2217, __extension__ __PRETTY_FUNCTION__))
;
2218 return Inst.getOperand(ClampIdx).getImm() == 0;
2219 }
2220
2221 return true;
2222}
2223
2224bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2225 const SMLoc &IDLoc) {
2226 if (!validateConstantBusLimitations(Inst)) {
2227 Error(IDLoc,
2228 "invalid operand (violates constant bus restrictions)");
2229 return false;
2230 }
2231 if (!validateEarlyClobberLimitations(Inst)) {
2232 Error(IDLoc,
2233 "destination must be different than all sources");
2234 return false;
2235 }
2236 if (!validateIntClampSupported(Inst)) {
2237 Error(IDLoc,
2238 "integer clamping is not supported on this GPU");
2239 return false;
2240 }
2241
2242 return true;
2243}
2244
2245bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2246 OperandVector &Operands,
2247 MCStreamer &Out,
2248 uint64_t &ErrorInfo,
2249 bool MatchingInlineAsm) {
2250 MCInst Inst;
2251 unsigned Result = Match_Success;
2252 for (auto Variant : getMatchedVariants()) {
2253 uint64_t EI;
2254 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2255 Variant);
2256 // We order match statuses from least to most specific. We use most specific
2257 // status as resulting
2258 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2259 if ((R == Match_Success) ||
2260 (R == Match_PreferE32) ||
2261 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2262 (R == Match_InvalidOperand && Result != Match_MissingFeature
2263 && Result != Match_PreferE32) ||
2264 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2265 && Result != Match_MissingFeature
2266 && Result != Match_PreferE32)) {
2267 Result = R;
2268 ErrorInfo = EI;
2269 }
2270 if (R == Match_Success)
2271 break;
2272 }
2273
2274 switch (Result) {
2275 default: break;
2276 case Match_Success:
2277 if (!validateInstruction(Inst, IDLoc)) {
2278 return true;
2279 }
2280 Inst.setLoc(IDLoc);
2281 Out.EmitInstruction(Inst, getSTI());
2282 return false;
2283
2284 case Match_MissingFeature:
2285 return Error(IDLoc, "instruction not supported on this GPU");
2286
2287 case Match_MnemonicFail:
2288 return Error(IDLoc, "unrecognized instruction mnemonic");
2289
2290 case Match_InvalidOperand: {
2291 SMLoc ErrorLoc = IDLoc;
2292 if (ErrorInfo != ~0ULL) {
2293 if (ErrorInfo >= Operands.size()) {
2294 return Error(IDLoc, "too few operands for instruction");
2295 }
2296 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2297 if (ErrorLoc == SMLoc())
2298 ErrorLoc = IDLoc;
2299 }
2300 return Error(ErrorLoc, "invalid operand for instruction");
2301 }
2302
2303 case Match_PreferE32:
2304 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2305 "should be encoded as e32");
2306 }
2307 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2307)
;
2308}
2309
2310bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2311 int64_t Tmp = -1;
2312 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
4
Calling 'MCAsmParserExtension::getLexer'
7
Returning from 'MCAsmParserExtension::getLexer'
8
Calling 'MCAsmLexer::isNot'
14
Returning from 'MCAsmLexer::isNot'
2313 return true;
2314 }
2315 if (getParser().parseAbsoluteExpression(Tmp)) {
15
Calling 'MCAsmParserExtension::getParser'
16
Returning from 'MCAsmParserExtension::getParser'
17
Assuming the condition is true
18
Taking true branch
2316 return true;
2317 }
2318 Ret = static_cast<uint32_t>(Tmp);
2319 return false;
2320}
2321
2322bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2323 uint32_t &Minor) {
2324 if (ParseAsAbsoluteExpression(Major))
3
Calling 'AMDGPUAsmParser::ParseAsAbsoluteExpression'
19
Returning from 'AMDGPUAsmParser::ParseAsAbsoluteExpression'
20
Taking true branch
2325 return TokError("invalid major version");
21
Calling constructor for 'Twine'
28
Returning from constructor for 'Twine'
29
Calling 'MCAsmParserExtension::TokError'
32
Returning from 'MCAsmParserExtension::TokError'
2326
2327 if (getLexer().isNot(AsmToken::Comma))
2328 return TokError("minor version number required, comma expected");
2329 Lex();
2330
2331 if (ParseAsAbsoluteExpression(Minor))
2332 return TokError("invalid minor version");
2333
2334 return false;
2335}
2336
2337bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
2338 uint32_t Major;
1
'Major' declared without an initial value
2339 uint32_t Minor;
2340
2341 if (ParseDirectiveMajorMinor(Major, Minor))
2
Calling 'AMDGPUAsmParser::ParseDirectiveMajorMinor'
33
Returning from 'AMDGPUAsmParser::ParseDirectiveMajorMinor'
34
Assuming the condition is false
35
Taking false branch
2342 return true;
2343
2344 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
36
1st function call argument is an uninitialized value
2345 return false;
2346}
2347
2348bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
2349 uint32_t Major;
2350 uint32_t Minor;
2351 uint32_t Stepping;
2352 StringRef VendorName;
2353 StringRef ArchName;
2354
2355 // If this directive has no arguments, then use the ISA version for the
2356 // targeted GPU.
2357 if (getLexer().is(AsmToken::EndOfStatement)) {
2358 AMDGPU::IsaInfo::IsaVersion ISA =
2359 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
2360 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2361 ISA.Stepping,
2362 "AMD", "AMDGPU");
2363 return false;
2364 }
2365
2366 if (ParseDirectiveMajorMinor(Major, Minor))
2367 return true;
2368
2369 if (getLexer().isNot(AsmToken::Comma))
2370 return TokError("stepping version number required, comma expected");
2371 Lex();
2372
2373 if (ParseAsAbsoluteExpression(Stepping))
2374 return TokError("invalid stepping version");
2375
2376 if (getLexer().isNot(AsmToken::Comma))
2377 return TokError("vendor name required, comma expected");
2378 Lex();
2379
2380 if (getLexer().isNot(AsmToken::String))
2381 return TokError("invalid vendor name");
2382
2383 VendorName = getLexer().getTok().getStringContents();
2384 Lex();
2385
2386 if (getLexer().isNot(AsmToken::Comma))
2387 return TokError("arch name required, comma expected");
2388 Lex();
2389
2390 if (getLexer().isNot(AsmToken::String))
2391 return TokError("invalid arch name");
2392
2393 ArchName = getLexer().getTok().getStringContents();
2394 Lex();
2395
2396 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2397 VendorName, ArchName);
2398 return false;
2399}
2400
2401bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2402 amd_kernel_code_t &Header) {
2403 SmallString<40> ErrStr;
2404 raw_svector_ostream Err(ErrStr);
2405 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
2406 return TokError(Err.str());
2407 }
2408 Lex();
2409 return false;
2410}
2411
2412bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
2413 amd_kernel_code_t Header;
2414 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
2415
2416 while (true) {
2417 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2418 // will set the current token to EndOfStatement.
2419 while(getLexer().is(AsmToken::EndOfStatement))
2420 Lex();
2421
2422 if (getLexer().isNot(AsmToken::Identifier))
2423 return TokError("expected value identifier or .end_amd_kernel_code_t");
2424
2425 StringRef ID = getLexer().getTok().getIdentifier();
2426 Lex();
2427
2428 if (ID == ".end_amd_kernel_code_t")
2429 break;
2430
2431 if (ParseAMDKernelCodeTValue(ID, Header))
2432 return true;
2433 }
2434
2435 getTargetStreamer().EmitAMDKernelCodeT(Header);
2436
2437 return false;
2438}
2439
2440bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2441 if (getLexer().isNot(AsmToken::Identifier))
2442 return TokError("expected symbol name");
2443
2444 StringRef KernelName = Parser.getTok().getString();
2445
2446 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2447 ELF::STT_AMDGPU_HSA_KERNEL);
2448 Lex();
2449 KernelScope.initialize(getContext());
2450 return false;
2451}
2452
2453bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
2454 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
2455 return Error(getParser().getTok().getLoc(),
2456 ".amd_amdgpu_isa directive is not available on non-amdgcn "
2457 "architectures");
2458 }
2459
2460 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
2461
2462 std::string ISAVersionStringFromSTI;
2463 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
2464 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
2465
2466 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
2467 return Error(getParser().getTok().getLoc(),
2468 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
2469 "arguments specified through the command line");
2470 }
2471
2472 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
2473 Lex();
2474
2475 return false;
2476}
2477
2478bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
2479 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
2480 return Error(getParser().getTok().getLoc(),
2481 (Twine(HSAMD::AssemblerDirectiveBegin) + Twine(" directive is "
2482 "not available on non-amdhsa OSes")).str());
2483 }
2484
2485 std::string HSAMetadataString;
2486 raw_string_ostream YamlStream(HSAMetadataString);
2487
2488 getLexer().setSkipSpace(false);
2489
2490 bool FoundEnd = false;
2491 while (!getLexer().is(AsmToken::Eof)) {
2492 while (getLexer().is(AsmToken::Space)) {
2493 YamlStream << getLexer().getTok().getString();
2494 Lex();
2495 }
2496
2497 if (getLexer().is(AsmToken::Identifier)) {
2498 StringRef ID = getLexer().getTok().getIdentifier();
2499 if (ID == AMDGPU::HSAMD::AssemblerDirectiveEnd) {
2500 Lex();
2501 FoundEnd = true;
2502 break;
2503 }
2504 }
2505
2506 YamlStream << Parser.parseStringToEndOfStatement()
2507 << getContext().getAsmInfo()->getSeparatorString();
2508
2509 Parser.eatToEndOfStatement();
2510 }
2511
2512 getLexer().setSkipSpace(true);
2513
2514 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
2515 return TokError(Twine("expected directive ") +
2516 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
2517 }
2518
2519 YamlStream.flush();
2520
2521 if (!getTargetStreamer().EmitHSAMetadata(HSAMetadataString))
2522 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
2523
2524 return false;
2525}
2526
2527bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
2528 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
2529 return Error(getParser().getTok().getLoc(),
2530 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
2531 "not available on non-amdpal OSes")).str());
2532 }
2533
2534 PALMD::Metadata PALMetadata;
2535 for (;;) {
2536 uint32_t Value;
2537 if (ParseAsAbsoluteExpression(Value)) {
2538 return TokError(Twine("invalid value in ") +
2539 Twine(PALMD::AssemblerDirective));
2540 }
2541 PALMetadata.push_back(Value);
2542 if (getLexer().isNot(AsmToken::Comma))
2543 break;
2544 Lex();
2545 }
2546 getTargetStreamer().EmitPALMetadata(PALMetadata);
2547 return false;
2548}
2549
2550bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
2551 StringRef IDVal = DirectiveID.getString();
2552
2553 if (IDVal == ".hsa_code_object_version")
2554 return ParseDirectiveHSACodeObjectVersion();
2555
2556 if (IDVal == ".hsa_code_object_isa")
2557 return ParseDirectiveHSACodeObjectISA();
2558
2559 if (IDVal == ".amd_kernel_code_t")
2560 return ParseDirectiveAMDKernelCodeT();
2561
2562 if (IDVal == ".amdgpu_hsa_kernel")
2563 return ParseDirectiveAMDGPUHsaKernel();
2564
2565 if (IDVal == ".amd_amdgpu_isa")
2566 return ParseDirectiveISAVersion();
2567
2568 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
2569 return ParseDirectiveHSAMetadata();
2570
2571 if (IDVal == PALMD::AssemblerDirective)
2572 return ParseDirectivePALMetadata();
2573
2574 return true;
2575}
2576
2577bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2578 unsigned RegNo) const {
2579 if (isCI())
2580 return true;
2581
2582 if (isSI()) {
2583 // No flat_scr
2584 switch (RegNo) {
2585 case AMDGPU::FLAT_SCR:
2586 case AMDGPU::FLAT_SCR_LO:
2587 case AMDGPU::FLAT_SCR_HI:
2588 return false;
2589 default:
2590 return true;
2591 }
2592 }
2593
2594 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2595 // SI/CI have.
2596 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2597 R.isValid(); ++R) {
2598 if (*R == RegNo)
2599 return false;
2600 }
2601
2602 return true;
2603}
2604
2605OperandMatchResultTy
2606AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
2607 // Try to parse with a custom parser
2608 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2609
2610 // If we successfully parsed the operand or if there as an error parsing,
2611 // we are done.
2612 //
2613 // If we are parsing after we reach EndOfStatement then this means we
2614 // are appending default values to the Operands list. This is only done
2615 // by custom parser, so we shouldn't continue on to the generic parsing.
2616 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
2617 getLexer().is(AsmToken::EndOfStatement))
2618 return ResTy;
2619
2620 ResTy = parseRegOrImm(Operands);
2621
2622 if (ResTy == MatchOperand_Success)
2623 return ResTy;
2624
2625 const auto &Tok = Parser.getTok();
2626 SMLoc S = Tok.getLoc();
2627
2628 const MCExpr *Expr = nullptr;
2629 if (!Parser.parseExpression(Expr)) {
2630 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2631 return MatchOperand_Success;
2632 }
2633
2634 // Possibly this is an instruction flag like 'gds'.
2635 if (Tok.getKind() == AsmToken::Identifier) {
2636 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
2637 Parser.Lex();
2638 return MatchOperand_Success;
2639 }
2640
2641 return MatchOperand_NoMatch;
2642}
2643
2644StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2645 // Clear any forced encodings from the previous instruction.
2646 setForcedEncodingSize(0);
2647 setForcedDPP(false);
2648 setForcedSDWA(false);
2649
2650 if (Name.endswith("_e64")) {
2651 setForcedEncodingSize(64);
2652 return Name.substr(0, Name.size() - 4);
2653 } else if (Name.endswith("_e32")) {
2654 setForcedEncodingSize(32);
2655 return Name.substr(0, Name.size() - 4);
2656 } else if (Name.endswith("_dpp")) {
2657 setForcedDPP(true);
2658 return Name.substr(0, Name.size() - 4);
2659 } else if (Name.endswith("_sdwa")) {
2660 setForcedSDWA(true);
2661 return Name.substr(0, Name.size() - 5);
2662 }
2663 return Name;
2664}
2665
2666bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2667 StringRef Name,
2668 SMLoc NameLoc, OperandVector &Operands) {
2669 // Add the instruction mnemonic
2670 Name = parseMnemonicSuffix(Name);
2671 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
2672
2673 while (!getLexer().is(AsmToken::EndOfStatement)) {
2674 OperandMatchResultTy Res = parseOperand(Operands, Name);
2675
2676 // Eat the comma or space if there is one.
2677 if (getLexer().is(AsmToken::Comma))
2678 Parser.Lex();
2679
2680 switch (Res) {
2681 case MatchOperand_Success: break;
2682 case MatchOperand_ParseFail:
2683 Error(getLexer().getLoc(), "failed parsing operand.");
2684 while (!getLexer().is(AsmToken::EndOfStatement)) {
2685 Parser.Lex();
2686 }
2687 return true;
2688 case MatchOperand_NoMatch:
2689 Error(getLexer().getLoc(), "not a valid operand.");
2690 while (!getLexer().is(AsmToken::EndOfStatement)) {
2691 Parser.Lex();
2692 }
2693 return true;
2694 }
2695 }
2696
2697 return false;
2698}
2699
2700//===----------------------------------------------------------------------===//
2701// Utility functions
2702//===----------------------------------------------------------------------===//
2703
2704OperandMatchResultTy
2705AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
2706 switch(getLexer().getKind()) {
2707 default: return MatchOperand_NoMatch;
2708 case AsmToken::Identifier: {
2709 StringRef Name = Parser.getTok().getString();
2710 if (!Name.equals(Prefix)) {
2711 return MatchOperand_NoMatch;
2712 }
2713
2714 Parser.Lex();
2715 if (getLexer().isNot(AsmToken::Colon))
2716 return MatchOperand_ParseFail;
2717
2718 Parser.Lex();
2719
2720 bool IsMinus = false;
2721 if (getLexer().getKind() == AsmToken::Minus) {
2722 Parser.Lex();
2723 IsMinus = true;
2724 }
2725
2726 if (getLexer().isNot(AsmToken::Integer))
2727 return MatchOperand_ParseFail;
2728
2729 if (getParser().parseAbsoluteExpression(Int))
2730 return MatchOperand_ParseFail;
2731
2732 if (IsMinus)
2733 Int = -Int;
2734 break;
2735 }
2736 }
2737 return MatchOperand_Success;
2738}
2739
2740OperandMatchResultTy
2741AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
2742 AMDGPUOperand::ImmTy ImmTy,
2743 bool (*ConvertResult)(int64_t&)) {
2744 SMLoc S = Parser.getTok().getLoc();
2745 int64_t Value = 0;
2746
2747 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
2748 if (Res != MatchOperand_Success)
2749 return Res;
2750
2751 if (ConvertResult && !ConvertResult(Value)) {
2752 return MatchOperand_ParseFail;
2753 }
2754
2755 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
2756 return MatchOperand_Success;
2757}
2758
2759OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
2760 const char *Prefix,
2761 OperandVector &Operands,
2762 AMDGPUOperand::ImmTy ImmTy,
2763 bool (*ConvertResult)(int64_t&)) {
2764 StringRef Name = Parser.getTok().getString();
2765 if (!Name.equals(Prefix))
2766 return MatchOperand_NoMatch;
2767
2768 Parser.Lex();
2769 if (getLexer().isNot(AsmToken::Colon))
2770 return MatchOperand_ParseFail;
2771
2772 Parser.Lex();
2773 if (getLexer().isNot(AsmToken::LBrac))
2774 return MatchOperand_ParseFail;
2775 Parser.Lex();
2776
2777 unsigned Val = 0;
2778 SMLoc S = Parser.getTok().getLoc();
2779
2780 // FIXME: How to verify the number of elements matches the number of src
2781 // operands?
2782 for (int I = 0; I < 4; ++I) {
2783 if (I != 0) {
2784 if (getLexer().is(AsmToken::RBrac))
2785 break;
2786
2787 if (getLexer().isNot(AsmToken::Comma))
2788 return MatchOperand_ParseFail;
2789 Parser.Lex();
2790 }
2791
2792 if (getLexer().isNot(AsmToken::Integer))
2793 return MatchOperand_ParseFail;
2794
2795 int64_t Op;
2796 if (getParser().parseAbsoluteExpression(Op))
2797 return MatchOperand_ParseFail;
2798
2799 if (Op != 0 && Op != 1)
2800 return MatchOperand_ParseFail;
2801 Val |= (Op << I);
2802 }
2803
2804 Parser.Lex();
2805 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
2806 return MatchOperand_Success;
2807}
2808
2809OperandMatchResultTy
2810AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
2811 AMDGPUOperand::ImmTy ImmTy) {
2812 int64_t Bit = 0;
2813 SMLoc S = Parser.getTok().getLoc();
2814
2815 // We are at the end of the statement, and this is a default argument, so
2816 // use a default value.
2817 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2818 switch(getLexer().getKind()) {
2819 case AsmToken::Identifier: {
2820 StringRef Tok = Parser.getTok().getString();
2821 if (Tok == Name) {
2822 Bit = 1;
2823 Parser.Lex();
2824 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
2825 Bit = 0;
2826 Parser.Lex();
2827 } else {
2828 return MatchOperand_NoMatch;
2829 }
2830 break;
2831 }
2832 default:
2833 return MatchOperand_NoMatch;
2834 }
2835 }
2836
2837 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
2838 return MatchOperand_Success;
2839}
2840
2841static void addOptionalImmOperand(
2842 MCInst& Inst, const OperandVector& Operands,
2843 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
2844 AMDGPUOperand::ImmTy ImmT,
2845 int64_t Default = 0) {
2846 auto i = OptionalIdx.find(ImmT);
2847 if (i != OptionalIdx.end()) {
2848 unsigned Idx = i->second;
2849 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
2850 } else {
2851 Inst.addOperand(MCOperand::createImm(Default));
2852 }
2853}
2854
2855OperandMatchResultTy
2856AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
2857 if (getLexer().isNot(AsmToken::Identifier)) {
2858 return MatchOperand_NoMatch;
2859 }
2860 StringRef Tok = Parser.getTok().getString();
2861 if (Tok != Prefix) {
2862 return MatchOperand_NoMatch;
2863 }
2864
2865 Parser.Lex();
2866 if (getLexer().isNot(AsmToken::Colon)) {
2867 return MatchOperand_ParseFail;
2868 }
2869
2870 Parser.Lex();
2871 if (getLexer().isNot(AsmToken::Identifier)) {
2872 return MatchOperand_ParseFail;
2873 }
2874
2875 Value = Parser.getTok().getString();
2876 return MatchOperand_Success;
2877}
2878
2879//===----------------------------------------------------------------------===//
2880// ds
2881//===----------------------------------------------------------------------===//
2882
2883void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2884 const OperandVector &Operands) {
2885 OptionalImmIndexMap OptionalIdx;
2886
2887 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2888 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2889
2890 // Add the register arguments
2891 if (Op.isReg()) {
2892 Op.addRegOperands(Inst, 1);
2893 continue;
2894 }
2895
2896 // Handle optional arguments
2897 OptionalIdx[Op.getImmTy()] = i;
2898 }
2899
2900 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2901 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
2902 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
2903
2904 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2905}
2906
2907void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
2908 bool IsGdsHardcoded) {
2909 OptionalImmIndexMap OptionalIdx;
2910
2911 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2912 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2913
2914 // Add the register arguments
2915 if (Op.isReg()) {
2916 Op.addRegOperands(Inst, 1);
2917 continue;
2918 }
2919
2920 if (Op.isToken() && Op.getToken() == "gds") {
2921 IsGdsHardcoded = true;
2922 continue;
2923 }
2924
2925 // Handle optional arguments
2926 OptionalIdx[Op.getImmTy()] = i;
2927 }
2928
2929 AMDGPUOperand::ImmTy OffsetType =
2930 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
2931 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
2932 AMDGPUOperand::ImmTyOffset;
2933
2934 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
2935
2936 if (!IsGdsHardcoded) {
2937 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
2938 }
2939 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2940}
2941
2942void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2943 OptionalImmIndexMap OptionalIdx;
2944
2945 unsigned OperandIdx[4];
2946 unsigned EnMask = 0;
2947 int SrcIdx = 0;
2948
2949 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2950 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2951
2952 // Add the register arguments
2953 if (Op.isReg()) {
2954 assert(SrcIdx < 4)(static_cast <bool> (SrcIdx < 4) ? void (0) : __assert_fail
("SrcIdx < 4", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2954, __extension__ __PRETTY_FUNCTION__))
;
2955 OperandIdx[SrcIdx] = Inst.size();
2956 Op.addRegOperands(Inst, 1);
2957 ++SrcIdx;
2958 continue;
2959 }
2960
2961 if (Op.isOff()) {
2962 assert(SrcIdx < 4)(static_cast <bool> (SrcIdx < 4) ? void (0) : __assert_fail
("SrcIdx < 4", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2962, __extension__ __PRETTY_FUNCTION__))
;
2963 OperandIdx[SrcIdx] = Inst.size();
2964 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
2965 ++SrcIdx;
2966 continue;
2967 }
2968
2969 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2970 Op.addImmOperands(Inst, 1);
2971 continue;
2972 }
2973
2974 if (Op.isToken() && Op.getToken() == "done")
2975 continue;
2976
2977 // Handle optional arguments
2978 OptionalIdx[Op.getImmTy()] = i;
2979 }
2980
2981 assert(SrcIdx == 4)(static_cast <bool> (SrcIdx == 4) ? void (0) : __assert_fail
("SrcIdx == 4", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2981, __extension__ __PRETTY_FUNCTION__))
;
2982
2983 bool Compr = false;
2984 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
2985 Compr = true;
2986 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
2987 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
2988 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
2989 }
2990
2991 for (auto i = 0; i < SrcIdx; ++i) {
2992 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
2993 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
2994 }
2995 }
2996
2997 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2998 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2999
3000 Inst.addOperand(MCOperand::createImm(EnMask));
3001}
3002
3003//===----------------------------------------------------------------------===//
3004// s_waitcnt
3005//===----------------------------------------------------------------------===//
3006
3007static bool
3008encodeCnt(
3009 const AMDGPU::IsaInfo::IsaVersion ISA,
3010 int64_t &IntVal,
3011 int64_t CntVal,
3012 bool Saturate,
3013 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
3014 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
3015{
3016 bool Failed = false;
3017
3018 IntVal = encode(ISA, IntVal, CntVal);
3019 if (CntVal != decode(ISA, IntVal)) {
3020 if (Saturate) {
3021 IntVal = encode(ISA, IntVal, -1);
3022 } else {
3023 Failed = true;
3024 }
3025 }
3026 return Failed;
3027}
3028
3029bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3030 StringRef CntName = Parser.getTok().getString();
3031 int64_t CntVal;
3032
3033 Parser.Lex();
3034 if (getLexer().isNot(AsmToken::LParen))
3035 return true;
3036
3037 Parser.Lex();
3038 if (getLexer().isNot(AsmToken::Integer))
3039 return true;
3040
3041 SMLoc ValLoc = Parser.getTok().getLoc();
3042 if (getParser().parseAbsoluteExpression(CntVal))
3043 return true;
3044
3045 AMDGPU::IsaInfo::IsaVersion ISA =
3046 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
3047
3048 bool Failed = true;
3049 bool Sat = CntName.endswith("_sat");
3050
3051 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3052 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3053 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3054 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3055 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3056 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3057 }
3058
3059 if (Failed) {
3060 Error(ValLoc, "too large value for " + CntName);
3061 return true;
3062 }
3063
3064 if (getLexer().isNot(AsmToken::RParen)) {
3065 return true;
3066 }
3067
3068 Parser.Lex();
3069 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3070 const AsmToken NextToken = getLexer().peekTok();
3071 if (NextToken.is(AsmToken::Identifier)) {
3072 Parser.Lex();
3073 }
3074 }
3075
3076 return false;
3077}
3078
3079OperandMatchResultTy
3080AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
3081 AMDGPU::IsaInfo::IsaVersion ISA =
3082 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
3083 int64_t Waitcnt = getWaitcntBitMask(ISA);
3084 SMLoc S = Parser.getTok().getLoc();
3085
3086 switch(getLexer().getKind()) {
3087 default: return MatchOperand_ParseFail;
3088 case AsmToken::Integer:
3089 // The operand can be an integer value.
3090 if (getParser().parseAbsoluteExpression(Waitcnt))
3091 return MatchOperand_ParseFail;
3092 break;
3093
3094 case AsmToken::Identifier:
3095 do {
3096 if (parseCnt(Waitcnt))
3097 return MatchOperand_ParseFail;
3098 } while(getLexer().isNot(AsmToken::EndOfStatement));
3099 break;
3100 }
3101 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
3102 return MatchOperand_Success;
3103}
3104
3105bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3106 int64_t &Width) {
3107 using namespace llvm::AMDGPU::Hwreg;
3108
3109 if (Parser.getTok().getString() != "hwreg")
3110 return true;
3111 Parser.Lex();
3112
3113 if (getLexer().isNot(AsmToken::LParen))
3114 return true;
3115 Parser.Lex();
3116
3117 if (getLexer().is(AsmToken::Identifier)) {
3118 HwReg.IsSymbolic = true;
3119 HwReg.Id = ID_UNKNOWN_;
3120 const StringRef tok = Parser.getTok().getString();
3121 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
3122 if (tok == IdSymbolic[i]) {
3123 HwReg.Id = i;
3124 break;
3125 }
3126 }
3127 Parser.Lex();
3128 } else {
3129 HwReg.IsSymbolic = false;
3130 if (getLexer().isNot(AsmToken::Integer))
3131 return true;
3132 if (getParser().parseAbsoluteExpression(HwReg.Id))
3133 return true;
3134 }
3135
3136 if (getLexer().is(AsmToken::RParen)) {
3137 Parser.Lex();
3138 return false;
3139 }
3140
3141 // optional params
3142 if (getLexer().isNot(AsmToken::Comma))
3143 return true;
3144 Parser.Lex();
3145
3146 if (getLexer().isNot(AsmToken::Integer))
3147 return true;
3148 if (getParser().parseAbsoluteExpression(Offset))
3149 return true;
3150
3151 if (getLexer().isNot(AsmToken::Comma))
3152 return true;
3153 Parser.Lex();
3154
3155 if (getLexer().isNot(AsmToken::Integer))
3156 return true;
3157 if (getParser().parseAbsoluteExpression(Width))
3158 return true;
3159
3160 if (getLexer().isNot(AsmToken::RParen))
3161 return true;
3162 Parser.Lex();
3163
3164 return false;
3165}
3166
3167OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
3168 using namespace llvm::AMDGPU::Hwreg;
3169
3170 int64_t Imm16Val = 0;
3171 SMLoc S = Parser.getTok().getLoc();
3172
3173 switch(getLexer().getKind()) {
3174 default: return MatchOperand_NoMatch;
3175 case AsmToken::Integer:
3176 // The operand can be an integer value.
3177 if (getParser().parseAbsoluteExpression(Imm16Val))
3178 return MatchOperand_NoMatch;
3179 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
3180 Error(S, "invalid immediate: only 16-bit values are legal");
3181 // Do not return error code, but create an imm operand anyway and proceed
3182 // to the next operand, if any. That avoids unneccessary error messages.
3183 }
3184 break;
3185
3186 case AsmToken::Identifier: {
3187 OperandInfoTy HwReg(ID_UNKNOWN_);
3188 int64_t Offset = OFFSET_DEFAULT_;
3189 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3190 if (parseHwregConstruct(HwReg, Offset, Width))
3191 return MatchOperand_ParseFail;
3192 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3193 if (HwReg.IsSymbolic)
3194 Error(S, "invalid symbolic name of hardware register");
3195 else
3196 Error(S, "invalid code of hardware register: only 6-bit values are legal");
3197 }
3198 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
3199 Error(S, "invalid bit offset: only 5-bit values are legal");
3200 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
3201 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
3202 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
3203 }
3204 break;
3205 }
3206 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
3207 return MatchOperand_Success;
3208}
3209
3210bool AMDGPUOperand::isSWaitCnt() const {
3211 return isImm();
3212}
3213
3214bool AMDGPUOperand::isHwreg() const {
3215 return isImmTy(ImmTyHwreg);
3216}
3217
3218bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
3219 using namespace llvm::AMDGPU::SendMsg;
3220
3221 if (Parser.getTok().getString() != "sendmsg")
3222 return true;
3223 Parser.Lex();
3224
3225 if (getLexer().isNot(AsmToken::LParen))
3226 return true;
3227 Parser.Lex();
3228
3229 if (getLexer().is(AsmToken::Identifier)) {
3230 Msg.IsSymbolic = true;
3231 Msg.Id = ID_UNKNOWN_;
3232 const std::string tok = Parser.getTok().getString();
3233 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3234 switch(i) {
3235 default: continue; // Omit gaps.
3236 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3237 }
3238 if (tok == IdSymbolic[i]) {
3239 Msg.Id = i;
3240 break;
3241 }
3242 }
3243 Parser.Lex();
3244 } else {
3245 Msg.IsSymbolic = false;
3246 if (getLexer().isNot(AsmToken::Integer))
3247 return true;
3248 if (getParser().parseAbsoluteExpression(Msg.Id))
3249 return true;
3250 if (getLexer().is(AsmToken::Integer))
3251 if (getParser().parseAbsoluteExpression(Msg.Id))
3252 Msg.Id = ID_UNKNOWN_;
3253 }
3254 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3255 return false;
3256
3257 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3258 if (getLexer().isNot(AsmToken::RParen))
3259 return true;
3260 Parser.Lex();
3261 return false;
3262 }
3263
3264 if (getLexer().isNot(AsmToken::Comma))
3265 return true;
3266 Parser.Lex();
3267
3268 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)(static_cast <bool> (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE
|| Msg.Id == ID_SYSMSG) ? void (0) : __assert_fail ("Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3268, __extension__ __PRETTY_FUNCTION__))
;
3269 Operation.Id = ID_UNKNOWN_;
3270 if (getLexer().is(AsmToken::Identifier)) {
3271 Operation.IsSymbolic = true;
3272 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3273 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3274 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
3275 const StringRef Tok = Parser.getTok().getString();
3276 for (int i = F; i < L; ++i) {
3277 if (Tok == S[i]) {
3278 Operation.Id = i;
3279 break;
3280 }
3281 }
3282 Parser.Lex();
3283 } else {
3284 Operation.IsSymbolic = false;
3285 if (getLexer().isNot(AsmToken::Integer))
3286 return true;
3287 if (getParser().parseAbsoluteExpression(Operation.Id))
3288 return true;
3289 }
3290
3291 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3292 // Stream id is optional.
3293 if (getLexer().is(AsmToken::RParen)) {
3294 Parser.Lex();
3295 return false;
3296 }
3297
3298 if (getLexer().isNot(AsmToken::Comma))
3299 return true;
3300 Parser.Lex();
3301
3302 if (getLexer().isNot(AsmToken::Integer))
3303 return true;
3304 if (getParser().parseAbsoluteExpression(StreamId))
3305 return true;
3306 }
3307
3308 if (getLexer().isNot(AsmToken::RParen))
3309 return true;
3310 Parser.Lex();
3311 return false;
3312}
3313
3314OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3315 if (getLexer().getKind() != AsmToken::Identifier)
3316 return MatchOperand_NoMatch;
3317
3318 StringRef Str = Parser.getTok().getString();
3319 int Slot = StringSwitch<int>(Str)
3320 .Case("p10", 0)
3321 .Case("p20", 1)
3322 .Case("p0", 2)
3323 .Default(-1);
3324
3325 SMLoc S = Parser.getTok().getLoc();
3326 if (Slot == -1)
3327 return MatchOperand_ParseFail;
3328
3329 Parser.Lex();
3330 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3331 AMDGPUOperand::ImmTyInterpSlot));
3332 return MatchOperand_Success;
3333}
3334
3335OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3336 if (getLexer().getKind() != AsmToken::Identifier)
3337 return MatchOperand_NoMatch;
3338
3339 StringRef Str = Parser.getTok().getString();
3340 if (!Str.startswith("attr"))
3341 return MatchOperand_NoMatch;
3342
3343 StringRef Chan = Str.take_back(2);
3344 int AttrChan = StringSwitch<int>(Chan)
3345 .Case(".x", 0)
3346 .Case(".y", 1)
3347 .Case(".z", 2)
3348 .Case(".w", 3)
3349 .Default(-1);
3350 if (AttrChan == -1)
3351 return MatchOperand_ParseFail;
3352
3353 Str = Str.drop_back(2).drop_front(4);
3354
3355 uint8_t Attr;
3356 if (Str.getAsInteger(10, Attr))
3357 return MatchOperand_ParseFail;
3358
3359 SMLoc S = Parser.getTok().getLoc();
3360 Parser.Lex();
3361 if (Attr > 63) {
3362 Error(S, "out of bounds attr");
3363 return MatchOperand_Success;
3364 }
3365
3366 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3367
3368 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3369 AMDGPUOperand::ImmTyInterpAttr));
3370 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3371 AMDGPUOperand::ImmTyAttrChan));
3372 return MatchOperand_Success;
3373}
3374
3375void AMDGPUAsmParser::errorExpTgt() {
3376 Error(Parser.getTok().getLoc(), "invalid exp target");
3377}
3378
3379OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3380 uint8_t &Val) {
3381 if (Str == "null") {
3382 Val = 9;
3383 return MatchOperand_Success;
3384 }
3385
3386 if (Str.startswith("mrt")) {
3387 Str = Str.drop_front(3);
3388 if (Str == "z") { // == mrtz
3389 Val = 8;
3390 return MatchOperand_Success;
3391 }
3392
3393 if (Str.getAsInteger(10, Val))
3394 return MatchOperand_ParseFail;
3395
3396 if (Val > 7)
3397 errorExpTgt();
3398
3399 return MatchOperand_Success;
3400 }
3401
3402 if (Str.startswith("pos")) {
3403 Str = Str.drop_front(3);
3404 if (Str.getAsInteger(10, Val))
3405 return MatchOperand_ParseFail;
3406
3407 if (Val > 3)
3408 errorExpTgt();
3409
3410 Val += 12;
3411 return MatchOperand_Success;
3412 }
3413
3414 if (Str.startswith("param")) {
3415 Str = Str.drop_front(5);
3416 if (Str.getAsInteger(10, Val))
3417 return MatchOperand_ParseFail;
3418
3419 if (Val >= 32)
3420 errorExpTgt();
3421
3422 Val += 32;
3423 return MatchOperand_Success;
3424 }
3425
3426 if (Str.startswith("invalid_target_")) {
3427 Str = Str.drop_front(15);
3428 if (Str.getAsInteger(10, Val))
3429 return MatchOperand_ParseFail;
3430
3431 errorExpTgt();
3432 return MatchOperand_Success;
3433 }
3434
3435 return MatchOperand_NoMatch;
3436}
3437
3438OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3439 uint8_t Val;
3440 StringRef Str = Parser.getTok().getString();
3441
3442 auto Res = parseExpTgtImpl(Str, Val);
3443 if (Res != MatchOperand_Success)
3444 return Res;
3445
3446 SMLoc S = Parser.getTok().getLoc();
3447 Parser.Lex();
3448
3449 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3450 AMDGPUOperand::ImmTyExpTgt));
3451 return MatchOperand_Success;
3452}
3453
3454OperandMatchResultTy
3455AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3456 using namespace llvm::AMDGPU::SendMsg;
3457
3458 int64_t Imm16Val = 0;
3459 SMLoc S = Parser.getTok().getLoc();
3460
3461 switch(getLexer().getKind()) {
3462 default:
3463 return MatchOperand_NoMatch;
3464 case AsmToken::Integer:
3465 // The operand can be an integer value.
3466 if (getParser().parseAbsoluteExpression(Imm16Val))
3467 return MatchOperand_NoMatch;
3468 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
3469 Error(S, "invalid immediate: only 16-bit values are legal");
3470 // Do not return error code, but create an imm operand anyway and proceed
3471 // to the next operand, if any. That avoids unneccessary error messages.
3472 }
3473 break;
3474 case AsmToken::Identifier: {
3475 OperandInfoTy Msg(ID_UNKNOWN_);
3476 OperandInfoTy Operation(OP_UNKNOWN_);
3477 int64_t StreamId = STREAM_ID_DEFAULT_;
3478 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3479 return MatchOperand_ParseFail;
3480 do {
3481 // Validate and encode message ID.
3482 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3483 || Msg.Id == ID_SYSMSG)) {
3484 if (Msg.IsSymbolic)
3485 Error(S, "invalid/unsupported symbolic name of message");
3486 else
3487 Error(S, "invalid/unsupported code of message");
3488 break;
3489 }
3490 Imm16Val = (Msg.Id << ID_SHIFT_);
3491 // Validate and encode operation ID.
3492 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3493 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3494 if (Operation.IsSymbolic)
3495 Error(S, "invalid symbolic name of GS_OP");
3496 else
3497 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3498 break;
3499 }
3500 if (Operation.Id == OP_GS_NOP
3501 && Msg.Id != ID_GS_DONE) {
3502 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3503 break;
3504 }
3505 Imm16Val |= (Operation.Id << OP_SHIFT_);
3506 }
3507 if (Msg.Id == ID_SYSMSG) {
3508 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3509 if (Operation.IsSymbolic)
3510 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3511 else
3512 Error(S, "invalid/unsupported code of SYSMSG_OP");
3513 break;
3514 }
3515 Imm16Val |= (Operation.Id << OP_SHIFT_);
3516 }
3517 // Validate and encode stream ID.
3518 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3519 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3520 Error(S, "invalid stream id: only 2-bit values are legal");
3521 break;
3522 }
3523 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3524 }
3525 } while (false);
3526 }
3527 break;
3528 }
3529 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
3530 return MatchOperand_Success;
3531}
3532
3533bool AMDGPUOperand::isSendMsg() const {
3534 return isImmTy(ImmTySendMsg);
3535}
3536
3537//===----------------------------------------------------------------------===//
3538// parser helpers
3539//===----------------------------------------------------------------------===//
3540
3541bool
3542AMDGPUAsmParser::trySkipId(const StringRef Id) {
3543 if (getLexer().getKind() == AsmToken::Identifier &&
3544 Parser.getTok().getString() == Id) {
3545 Parser.Lex();
3546 return true;
3547 }
3548 return false;
3549}
3550
3551bool
3552AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
3553 if (getLexer().getKind() == Kind) {
3554 Parser.Lex();
3555 return true;
3556 }
3557 return false;
3558}
3559
3560bool
3561AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
3562 const StringRef ErrMsg) {
3563 if (!trySkipToken(Kind)) {
3564 Error(Parser.getTok().getLoc(), ErrMsg);
3565 return false;
3566 }
3567 return true;
3568}
3569
3570bool
3571AMDGPUAsmParser::parseExpr(int64_t &Imm) {
3572 return !getParser().parseAbsoluteExpression(Imm);
3573}
3574
3575bool
3576AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
3577 SMLoc S = Parser.getTok().getLoc();
3578 if (getLexer().getKind() == AsmToken::String) {
3579 Val = Parser.getTok().getStringContents();
3580 Parser.Lex();
3581 return true;
3582 } else {
3583 Error(S, ErrMsg);
3584 return false;
3585 }
3586}
3587
3588//===----------------------------------------------------------------------===//
3589// swizzle
3590//===----------------------------------------------------------------------===//
3591
3592LLVM_READNONE__attribute__((__const__))
3593static unsigned
3594encodeBitmaskPerm(const unsigned AndMask,
3595 const unsigned OrMask,
3596 const unsigned XorMask) {
3597 using namespace llvm::AMDGPU::Swizzle;
3598
3599 return BITMASK_PERM_ENC |
3600 (AndMask << BITMASK_AND_SHIFT) |
3601 (OrMask << BITMASK_OR_SHIFT) |
3602 (XorMask << BITMASK_XOR_SHIFT);
3603}
3604
3605bool
3606AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
3607 const unsigned MinVal,
3608 const unsigned MaxVal,
3609 const StringRef ErrMsg) {
3610 for (unsigned i = 0; i < OpNum; ++i) {
3611 if (!skipToken(AsmToken::Comma, "expected a comma")){
3612 return false;
3613 }
3614 SMLoc ExprLoc = Parser.getTok().getLoc();
3615 if (!parseExpr(Op[i])) {
3616 return false;
3617 }
3618 if (Op[i] < MinVal || Op[i] > MaxVal) {
3619 Error(ExprLoc, ErrMsg);
3620 return false;
3621 }
3622 }
3623
3624 return true;
3625}
3626
3627bool
3628AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
3629 using namespace llvm::AMDGPU::Swizzle;
3630
3631 int64_t Lane[LANE_NUM];
3632 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
3633 "expected a 2-bit lane id")) {
3634 Imm = QUAD_PERM_ENC;
3635 for (auto i = 0; i < LANE_NUM; ++i) {
3636 Imm |= Lane[i] << (LANE_SHIFT * i);
3637 }
3638 return true;
3639 }
3640 return false;
3641}
3642
3643bool
3644AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
3645 using namespace llvm::AMDGPU::Swizzle;
3646
3647 SMLoc S = Parser.getTok().getLoc();
3648 int64_t GroupSize;
3649 int64_t LaneIdx;
3650
3651 if (!parseSwizzleOperands(1, &GroupSize,
3652 2, 32,
3653 "group size must be in the interval [2,32]")) {
3654 return false;
3655 }
3656 if (!isPowerOf2_64(GroupSize)) {
3657 Error(S, "group size must be a power of two");
3658 return false;
3659 }
3660 if (parseSwizzleOperands(1, &LaneIdx,
3661 0, GroupSize - 1,
3662 "lane id must be in the interval [0,group size - 1]")) {
3663 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
3664 return true;
3665 }
3666 return false;
3667}
3668
3669bool
3670AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
3671 using namespace llvm::AMDGPU::Swizzle;
3672
3673 SMLoc S = Parser.getTok().getLoc();
3674 int64_t GroupSize;
3675
3676 if (!parseSwizzleOperands(1, &GroupSize,
3677 2, 32, "group size must be in the interval [2,32]")) {
3678 return false;
3679 }
3680 if (!isPowerOf2_64(GroupSize)) {
3681 Error(S, "group size must be a power of two");
3682 return false;
3683 }
3684
3685 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
3686 return true;
3687}
3688
3689bool
3690AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
3691 using namespace llvm::AMDGPU::Swizzle;
3692
3693 SMLoc S = Parser.getTok().getLoc();
3694 int64_t GroupSize;
3695
3696 if (!parseSwizzleOperands(1, &GroupSize,
3697 1, 16, "group size must be in the interval [1,16]")) {
3698 return false;
3699 }
3700 if (!isPowerOf2_64(GroupSize)) {
3701 Error(S, "group size must be a power of two");
3702 return false;
3703 }
3704
3705 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
3706 return true;
3707}
3708
3709bool
3710AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
3711 using namespace llvm::AMDGPU::Swizzle;
3712
3713 if (!skipToken(AsmToken::Comma, "expected a comma")) {
3714 return false;
3715 }
3716
3717 StringRef Ctl;
3718 SMLoc StrLoc = Parser.getTok().getLoc();
3719 if (!parseString(Ctl)) {
3720 return false;
3721 }
3722 if (Ctl.size() != BITMASK_WIDTH) {
3723 Error(StrLoc, "expected a 5-character mask");
3724 return false;
3725 }
3726
3727 unsigned AndMask = 0;
3728 unsigned OrMask = 0;
3729 unsigned XorMask = 0;
3730
3731 for (size_t i = 0; i < Ctl.size(); ++i) {
3732 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
3733 switch(Ctl[i]) {
3734 default:
3735 Error(StrLoc, "invalid mask");
3736 return false;
3737 case '0':
3738 break;
3739 case '1':
3740 OrMask |= Mask;
3741 break;
3742 case 'p':
3743 AndMask |= Mask;
3744 break;
3745 case 'i':
3746 AndMask |= Mask;
3747 XorMask |= Mask;
3748 break;
3749 }
3750 }
3751
3752 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
3753 return true;
3754}
3755
3756bool
3757AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
3758
3759 SMLoc OffsetLoc = Parser.getTok().getLoc();
3760
3761 if (!parseExpr(Imm)) {
3762 return false;
3763 }
3764 if (!isUInt<16>(Imm)) {
3765 Error(OffsetLoc, "expected a 16-bit offset");
3766 return false;
3767 }
3768 return true;
3769}
3770
3771bool
3772AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
3773 using namespace llvm::AMDGPU::Swizzle;
3774
3775 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
3776
3777 SMLoc ModeLoc = Parser.getTok().getLoc();
3778 bool Ok = false;
3779
3780 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
3781 Ok = parseSwizzleQuadPerm(Imm);
3782 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
3783 Ok = parseSwizzleBitmaskPerm(Imm);
3784 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
3785 Ok = parseSwizzleBroadcast(Imm);
3786 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
3787 Ok = parseSwizzleSwap(Imm);
3788 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
3789 Ok = parseSwizzleReverse(Imm);
3790 } else {
3791 Error(ModeLoc, "expected a swizzle mode");
3792 }
3793
3794 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
3795 }
3796
3797 return false;
3798}
3799
3800OperandMatchResultTy
3801AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
3802 SMLoc S = Parser.getTok().getLoc();
3803 int64_t Imm = 0;
3804
3805 if (trySkipId("offset")) {
3806
3807 bool Ok = false;
3808 if (skipToken(AsmToken::Colon, "expected a colon")) {
3809 if (trySkipId("swizzle")) {
3810 Ok = parseSwizzleMacro(Imm);
3811 } else {
3812 Ok = parseSwizzleOffset(Imm);
3813 }
3814 }
3815
3816 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
3817
3818 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
3819 } else {
3820 return MatchOperand_NoMatch;
3821 }
3822}
3823
3824bool
3825AMDGPUOperand::isSwizzle() const {
3826 return isImmTy(ImmTySwizzle);
3827}
3828
3829//===----------------------------------------------------------------------===//
3830// sopp branch targets
3831//===----------------------------------------------------------------------===//
3832
3833OperandMatchResultTy
3834AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
3835 SMLoc S = Parser.getTok().getLoc();
3836
3837 switch (getLexer().getKind()) {
3838 default: return MatchOperand_ParseFail;
3839 case AsmToken::Integer: {
3840 int64_t Imm;
3841 if (getParser().parseAbsoluteExpression(Imm))
3842 return MatchOperand_ParseFail;
3843 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
3844 return MatchOperand_Success;
3845 }
3846
3847 case AsmToken::Identifier:
3848 Operands.push_back(AMDGPUOperand::CreateExpr(this,
3849 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
3850 Parser.getTok().getString()), getContext()), S));
3851 Parser.Lex();
3852 return MatchOperand_Success;
3853 }
3854}
3855
3856//===----------------------------------------------------------------------===//
3857// mubuf
3858//===----------------------------------------------------------------------===//
3859
3860AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
3861 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
3862}
3863
3864AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
3865 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
3866}
3867
3868AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
3869 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
3870}
3871
3872void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
3873 const OperandVector &Operands,
3874 bool IsAtomic, bool IsAtomicReturn) {
3875 OptionalImmIndexMap OptionalIdx;
3876 assert(IsAtomicReturn ? IsAtomic : true)(static_cast <bool> (IsAtomicReturn ? IsAtomic : true) ?
void (0) : __assert_fail ("IsAtomicReturn ? IsAtomic : true"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3876, __extension__ __PRETTY_FUNCTION__))
;
3877
3878 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3879 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3880
3881 // Add the register arguments
3882 if (Op.isReg()) {
3883 Op.addRegOperands(Inst, 1);
3884 continue;
3885 }
3886
3887 // Handle the case where soffset is an immediate
3888 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3889 Op.addImmOperands(Inst, 1);
3890 continue;
3891 }
3892
3893 // Handle tokens like 'offen' which are sometimes hard-coded into the
3894 // asm string. There are no MCInst operands for these.
3895 if (Op.isToken()) {
3896 continue;
3897 }
3898 assert(Op.isImm())(static_cast <bool> (Op.isImm()) ? void (0) : __assert_fail
("Op.isImm()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3898, __extension__ __PRETTY_FUNCTION__))
;
3899
3900 // Handle optional arguments
3901 OptionalIdx[Op.getImmTy()] = i;
3902 }
3903
3904 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
3905 if (IsAtomicReturn) {
3906 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
3907 Inst.insert(I, *I);
3908 }
3909
3910 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
3911 if (!IsAtomic) { // glc is hard-coded.
3912 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3913 }
3914 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3915 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3916}
3917
3918void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
3919 OptionalImmIndexMap OptionalIdx;
3920
3921 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3922 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3923
3924 // Add the register arguments
3925 if (Op.isReg()) {
3926 Op.addRegOperands(Inst, 1);
3927 continue;
3928 }
3929
3930 // Handle the case where soffset is an immediate
3931 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3932 Op.addImmOperands(Inst, 1);
3933 continue;
3934 }
3935
3936 // Handle tokens like 'offen' which are sometimes hard-coded into the
3937 // asm string. There are no MCInst operands for these.
3938 if (Op.isToken()) {
3939 continue;
3940 }
3941 assert(Op.isImm())(static_cast <bool> (Op.isImm()) ? void (0) : __assert_fail
("Op.isImm()", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3941, __extension__ __PRETTY_FUNCTION__))
;
3942
3943 // Handle optional arguments
3944 OptionalIdx[Op.getImmTy()] = i;
3945 }
3946
3947 addOptionalImmOperand(Inst, Operands, OptionalIdx,
3948 AMDGPUOperand::ImmTyOffset);
3949 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDFMT);
3950 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyNFMT);
3951 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3952 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3953 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3954}
3955
3956//===----------------------------------------------------------------------===//
3957// mimg
3958//===----------------------------------------------------------------------===//
3959
3960void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
3961 bool IsAtomic) {
3962 unsigned I = 1;
3963 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3964 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3965 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3966 }
3967
3968 if (IsAtomic) {
3969 // Add src, same as dst
3970 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
3971 }
3972
3973 OptionalImmIndexMap OptionalIdx;
3974
3975 for (unsigned E = Operands.size(); I != E; ++I) {
3976 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3977
3978 // Add the register arguments
3979 if (Op.isRegOrImm()) {
3980 Op.addRegOrImmOperands(Inst, 1);
3981 continue;
3982 } else if (Op.isImmModifier()) {
3983 OptionalIdx[Op.getImmTy()] = I;
3984 } else {
3985 llvm_unreachable("unexpected operand type")::llvm::llvm_unreachable_internal("unexpected operand type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3985)
;
3986 }
3987 }
3988
3989 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
3990 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
3991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3992 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
3993 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
3994 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3995 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
3996 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3997}
3998
3999void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
4000 cvtMIMG(Inst, Operands, true);
4001}
4002
4003AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
4004 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
4005}
4006
4007AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
4008 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
4009}
4010
4011AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
4012 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
4013}
4014
4015AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
4016 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
4017}
4018
4019AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
4020 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
4021}
4022
4023//===----------------------------------------------------------------------===//
4024// smrd
4025//===----------------------------------------------------------------------===//
4026
4027bool AMDGPUOperand::isSMRDOffset8() const {
4028 return isImm() && isUInt<8>(getImm());
4029}
4030
4031bool AMDGPUOperand::isSMRDOffset20() const {
4032 return isImm() && isUInt<20>(getImm());
4033}
4034
4035bool AMDGPUOperand::isSMRDLiteralOffset() const {
4036 // 32-bit literals are only supported on CI and we only want to use them
4037 // when the offset is > 8-bits.
4038 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
4039}
4040
4041AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
4042 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4043}
4044
4045AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
4046 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4047}
4048
4049AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
4050 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4051}
4052
4053AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
4054 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4055}
4056
4057AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
4058 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4059}
4060
4061//===----------------------------------------------------------------------===//
4062// vop3
4063//===----------------------------------------------------------------------===//
4064
4065static bool ConvertOmodMul(int64_t &Mul) {
4066 if (Mul != 1 && Mul != 2 && Mul != 4)
4067 return false;
4068
4069 Mul >>= 1;
4070 return true;
4071}
4072
4073static bool ConvertOmodDiv(int64_t &Div) {
4074 if (Div == 1) {
4075 Div = 0;
4076 return true;
4077 }
4078
4079 if (Div == 2) {
4080 Div = 3;
4081 return true;
4082 }
4083
4084 return false;
4085}
4086
4087static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4088 if (BoundCtrl == 0) {
4089 BoundCtrl = 1;
4090 return true;
4091 }
4092
4093 if (BoundCtrl == -1) {
4094 BoundCtrl = 0;
4095 return true;
4096 }
4097
4098 return false;
4099}
4100
4101// Note: the order in this table matches the order of operands in AsmString.
4102static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4103 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4104 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4105 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4106 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4107 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4108 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
4109 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
4110 {"dfmt", AMDGPUOperand::ImmTyDFMT, false, nullptr},
4111 {"nfmt", AMDGPUOperand::ImmTyNFMT, false, nullptr},
4112 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4113 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4114 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
4115 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
4116 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4117 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4118 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4119 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
4120 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
4121 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
4122 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4123 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4124 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4125 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
4126 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4127 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4128 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
4129 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
4130 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
4131 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
4132 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4133 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4134 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4135 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
4136};
4137
4138OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
4139 OperandMatchResultTy res;
4140 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4141 // try to parse any optional operand here
4142 if (Op.IsBit) {
4143 res = parseNamedBit(Op.Name, Operands, Op.Type);
4144 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4145 res = parseOModOperand(Operands);
4146 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4147 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4148 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4149 res = parseSDWASel(Operands, Op.Name, Op.Type);
4150 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4151 res = parseSDWADstUnused(Operands);
4152 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4153 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4154 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4155 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4156 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4157 Op.ConvertResult);
4158 } else {
4159 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4160 }
4161 if (res != MatchOperand_NoMatch) {
4162 return res;
4163 }
4164 }
4165 return MatchOperand_NoMatch;
4166}
4167
4168OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
4169 StringRef Name = Parser.getTok().getString();
4170 if (Name == "mul") {
4171 return parseIntWithPrefix("mul", Operands,
4172 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
4173 }
4174
4175 if (Name == "div") {
4176 return parseIntWithPrefix("div", Operands,
4177 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4178 }
4179
4180 return MatchOperand_NoMatch;
4181}
4182
4183void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4184 cvtVOP3P(Inst, Operands);
4185
4186 int Opc = Inst.getOpcode();
4187
4188 int SrcNum;
4189 const int Ops[] = { AMDGPU::OpName::src0,
4190 AMDGPU::OpName::src1,
4191 AMDGPU::OpName::src2 };
4192 for (SrcNum = 0;
4193 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4194 ++SrcNum);
4195 assert(SrcNum > 0)(static_cast <bool> (SrcNum > 0) ? void (0) : __assert_fail
("SrcNum > 0", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4195, __extension__ __PRETTY_FUNCTION__))
;
4196
4197 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4198 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4199
4200 if ((OpSel & (1 << SrcNum)) != 0) {
4201 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4202 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4203 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4204 }
4205}
4206
4207static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4208 // 1. This operand is input modifiers
4209 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4210 // 2. This is not last operand
4211 && Desc.NumOperands > (OpNum + 1)
4212 // 3. Next operand is register class
4213 && Desc.OpInfo[OpNum + 1].RegClass != -1
4214 // 4. Next register is not tied to any other operand
4215 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4216}
4217
4218void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
4219{
4220 OptionalImmIndexMap OptionalIdx;
4221 unsigned Opc = Inst.getOpcode();
4222
4223 unsigned I = 1;
4224 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4225 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4226 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4227 }
4228
4229 for (unsigned E = Operands.size(); I != E; ++I) {
4230 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4231 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4232 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4233 } else if (Op.isInterpSlot() ||
4234 Op.isInterpAttr() ||
4235 Op.isAttrChan()) {
4236 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4237 } else if (Op.isImmModifier()) {
4238 OptionalIdx[Op.getImmTy()] = I;
4239 } else {
4240 llvm_unreachable("unhandled operand type")::llvm::llvm_unreachable_internal("unhandled operand type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4240)
;
4241 }
4242 }
4243
4244 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4245 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4246 }
4247
4248 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4249 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4250 }
4251
4252 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4253 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4254 }
4255}
4256
4257void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4258 OptionalImmIndexMap &OptionalIdx) {
4259 unsigned Opc = Inst.getOpcode();
4260
4261 unsigned I = 1;
4262 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4263 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4264 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4265 }
4266
4267 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4268 // This instruction has src modifiers
4269 for (unsigned E = Operands.size(); I != E; ++I) {
4270 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4271 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4272 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4273 } else if (Op.isImmModifier()) {
4274 OptionalIdx[Op.getImmTy()] = I;
4275 } else if (Op.isRegOrImm()) {
4276 Op.addRegOrImmOperands(Inst, 1);
4277 } else {
4278 llvm_unreachable("unhandled operand type")::llvm::llvm_unreachable_internal("unhandled operand type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4278)
;
4279 }
4280 }
4281 } else {
4282 // No src modifiers
4283 for (unsigned E = Operands.size(); I != E; ++I) {
4284 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4285 if (Op.isMod()) {
4286 OptionalIdx[Op.getImmTy()] = I;
4287 } else {
4288 Op.addRegOrImmOperands(Inst, 1);
4289 }
4290 }
4291 }
4292
4293 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4294 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4295 }
4296
4297 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4298 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4299 }
4300
4301 // special case v_mac_{f16, f32}:
4302 // it has src2 register operand that is tied to dst operand
4303 // we don't allow modifiers for this operand in assembler so src2_modifiers
4304 // should be 0
4305 if (Opc == AMDGPU::V_MAC_F32_e64_si || Opc == AMDGPU::V_MAC_F32_e64_vi ||
4306 Opc == AMDGPU::V_MAC_F16_e64_vi) {
4307 auto it = Inst.begin();
4308 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
4309 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
4310 ++it;
4311 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4312 }
4313}
4314
4315void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
4316 OptionalImmIndexMap OptionalIdx;
4317 cvtVOP3(Inst, Operands, OptionalIdx);
4318}
4319
4320void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
4321 const OperandVector &Operands) {
4322 OptionalImmIndexMap OptIdx;
4323 const int Opc = Inst.getOpcode();
4324 const MCInstrDesc &Desc = MII.get(Opc);
4325
4326 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
4327
4328 cvtVOP3(Inst, Operands, OptIdx);
4329
4330 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
4331 assert(!IsPacked)(static_cast <bool> (!IsPacked) ? void (0) : __assert_fail
("!IsPacked", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4331, __extension__ __PRETTY_FUNCTION__))
;
4332 Inst.addOperand(Inst.getOperand(0));
4333 }
4334
4335 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
4336 // instruction, and then figure out where to actually put the modifiers
4337
4338 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
4339
4340 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
4341 if (OpSelHiIdx != -1) {
4342 int DefaultVal = IsPacked ? -1 : 0;
4343 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
4344 DefaultVal);
4345 }
4346
4347 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
4348 if (NegLoIdx != -1) {
4349 assert(IsPacked)(static_cast <bool> (IsPacked) ? void (0) : __assert_fail
("IsPacked", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4349, __extension__ __PRETTY_FUNCTION__))
;
4350 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
4351 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
4352 }
4353
4354 const int Ops[] = { AMDGPU::OpName::src0,
4355 AMDGPU::OpName::src1,
4356 AMDGPU::OpName::src2 };
4357 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
4358 AMDGPU::OpName::src1_modifiers,
4359 AMDGPU::OpName::src2_modifiers };
4360
4361 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4362
4363 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4364 unsigned OpSelHi = 0;
4365 unsigned NegLo = 0;
4366 unsigned NegHi = 0;
4367
4368 if (OpSelHiIdx != -1) {
4369 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
4370 }
4371
4372 if (NegLoIdx != -1) {
4373 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
4374 NegLo = Inst.getOperand(NegLoIdx).getImm();
4375 NegHi = Inst.getOperand(NegHiIdx).getImm();
4376 }
4377
4378 for (int J = 0; J < 3; ++J) {
4379 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
4380 if (OpIdx == -1)
4381 break;
4382
4383 uint32_t ModVal = 0;
4384
4385 if ((OpSel & (1 << J)) != 0)
4386 ModVal |= SISrcMods::OP_SEL_0;
4387
4388 if ((OpSelHi & (1 << J)) != 0)
4389 ModVal |= SISrcMods::OP_SEL_1;
4390
4391 if ((NegLo & (1 << J)) != 0)
4392 ModVal |= SISrcMods::NEG;
4393
4394 if ((NegHi & (1 << J)) != 0)
4395 ModVal |= SISrcMods::NEG_HI;
4396
4397 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
4398
4399 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
4400 }
4401}
4402
4403//===----------------------------------------------------------------------===//
4404// dpp
4405//===----------------------------------------------------------------------===//
4406
4407bool AMDGPUOperand::isDPPCtrl() const {
4408 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
4409 if (result) {
4410 int64_t Imm = getImm();
4411 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
4412 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
4413 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
4414 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
4415 (Imm == 0x130) ||
4416 (Imm == 0x134) ||
4417 (Imm == 0x138) ||
4418 (Imm == 0x13c) ||
4419 (Imm == 0x140) ||
4420 (Imm == 0x141) ||
4421 (Imm == 0x142) ||
4422 (Imm == 0x143);
4423 }
4424 return false;
4425}
4426
4427bool AMDGPUOperand::isGPRIdxMode() const {
4428 return isImm() && isUInt<4>(getImm());
4429}
4430
4431bool AMDGPUOperand::isS16Imm() const {
4432 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
4433}
4434
4435bool AMDGPUOperand::isU16Imm() const {
4436 return isImm() && isUInt<16>(getImm());
4437}
4438
4439OperandMatchResultTy
4440AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
4441 SMLoc S = Parser.getTok().getLoc();
4442 StringRef Prefix;
4443 int64_t Int;
4444
4445 if (getLexer().getKind() == AsmToken::Identifier) {
4446 Prefix = Parser.getTok().getString();
4447 } else {
4448 return MatchOperand_NoMatch;
4449 }
4450
4451 if (Prefix == "row_mirror") {
4452 Int = 0x140;
4453 Parser.Lex();
4454 } else if (Prefix == "row_half_mirror") {
4455 Int = 0x141;
4456 Parser.Lex();
4457 } else {
4458 // Check to prevent parseDPPCtrlOps from eating invalid tokens
4459 if (Prefix != "quad_perm"
4460 && Prefix != "row_shl"
4461 && Prefix != "row_shr"
4462 && Prefix != "row_ror"
4463 && Prefix != "wave_shl"
4464 && Prefix != "wave_rol"
4465 && Prefix != "wave_shr"
4466 && Prefix != "wave_ror"
4467 && Prefix != "row_bcast") {
4468 return MatchOperand_NoMatch;
4469 }
4470
4471 Parser.Lex();
4472 if (getLexer().isNot(AsmToken::Colon))
4473 return MatchOperand_ParseFail;
4474
4475 if (Prefix == "quad_perm") {
4476 // quad_perm:[%d,%d,%d,%d]
4477 Parser.Lex();
4478 if (getLexer().isNot(AsmToken::LBrac))
4479 return MatchOperand_ParseFail;
4480 Parser.Lex();
4481
4482 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
4483 return MatchOperand_ParseFail;
4484
4485 for (int i = 0; i < 3; ++i) {
4486 if (getLexer().isNot(AsmToken::Comma))
4487 return MatchOperand_ParseFail;
4488 Parser.Lex();
4489
4490 int64_t Temp;
4491 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
4492 return MatchOperand_ParseFail;
4493 const int shift = i*2 + 2;
4494 Int += (Temp << shift);
4495 }
4496
4497 if (getLexer().isNot(AsmToken::RBrac))
4498 return MatchOperand_ParseFail;
4499 Parser.Lex();
4500 } else {
4501 // sel:%d
4502 Parser.Lex();
4503 if (getParser().parseAbsoluteExpression(Int))
4504 return MatchOperand_ParseFail;
4505
4506 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
4507 Int |= 0x100;
4508 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
4509 Int |= 0x110;
4510 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
4511 Int |= 0x120;
4512 } else if (Prefix == "wave_shl" && 1 == Int) {
4513 Int = 0x130;
4514 } else if (Prefix == "wave_rol" && 1 == Int) {
4515 Int = 0x134;
4516 } else if (Prefix == "wave_shr" && 1 == Int) {
4517 Int = 0x138;
4518 } else if (Prefix == "wave_ror" && 1 == Int) {
4519 Int = 0x13C;
4520 } else if (Prefix == "row_bcast") {
4521 if (Int == 15) {
4522 Int = 0x142;
4523 } else if (Int == 31) {
4524 Int = 0x143;
4525 } else {
4526 return MatchOperand_ParseFail;
4527 }
4528 } else {
4529 return MatchOperand_ParseFail;
4530 }
4531 }
4532 }
4533
4534 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
4535 return MatchOperand_Success;
4536}
4537
4538AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
4539 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
4540}
4541
4542AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
4543 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
4544}
4545
4546AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
4547 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
4548}
4549
4550void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
4551 OptionalImmIndexMap OptionalIdx;
4552
4553 unsigned I = 1;
4554 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4555 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4556 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4557 }
4558
4559 // All DPP instructions with at least one source operand have a fake "old"
4560 // source at the beginning that's tied to the dst operand. Handle it here.
4561 if (Desc.getNumOperands() >= 2)
4562 Inst.addOperand(Inst.getOperand(0));
4563
4564 for (unsigned E = Operands.size(); I != E; ++I) {
4565 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4566 // Add the register arguments
4567 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4568 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
4569 // Skip it.
4570 continue;
4571 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4572 Op.addRegWithFPInputModsOperands(Inst, 2);
4573 } else if (Op.isDPPCtrl()) {
4574 Op.addImmOperands(Inst, 1);
4575 } else if (Op.isImm()) {
4576 // Handle optional arguments
4577 OptionalIdx[Op.getImmTy()] = I;
4578 } else {
4579 llvm_unreachable("Invalid operand type")::llvm::llvm_unreachable_internal("Invalid operand type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4579)
;
4580 }
4581 }
4582
4583 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
4584 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
4585 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
4586}
4587
4588//===----------------------------------------------------------------------===//
4589// sdwa
4590//===----------------------------------------------------------------------===//
4591
4592OperandMatchResultTy
4593AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
4594 AMDGPUOperand::ImmTy Type) {
4595 using namespace llvm::AMDGPU::SDWA;
4596
4597 SMLoc S = Parser.getTok().getLoc();
4598 StringRef Value;
4599 OperandMatchResultTy res;
4600
4601 res = parseStringWithPrefix(Prefix, Value);
4602 if (res != MatchOperand_Success) {
4603 return res;
4604 }
4605
4606 int64_t Int;
4607 Int = StringSwitch<int64_t>(Value)
4608 .Case("BYTE_0", SdwaSel::BYTE_0)
4609 .Case("BYTE_1", SdwaSel::BYTE_1)
4610 .Case("BYTE_2", SdwaSel::BYTE_2)
4611 .Case("BYTE_3", SdwaSel::BYTE_3)
4612 .Case("WORD_0", SdwaSel::WORD_0)
4613 .Case("WORD_1", SdwaSel::WORD_1)
4614 .Case("DWORD", SdwaSel::DWORD)
4615 .Default(0xffffffff);
4616 Parser.Lex(); // eat last token
4617
4618 if (Int == 0xffffffff) {
4619 return MatchOperand_ParseFail;
4620 }
4621
4622 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
4623 return MatchOperand_Success;
4624}
4625
4626OperandMatchResultTy
4627AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
4628 using namespace llvm::AMDGPU::SDWA;
4629
4630 SMLoc S = Parser.getTok().getLoc();
4631 StringRef Value;
4632 OperandMatchResultTy res;
4633
4634 res = parseStringWithPrefix("dst_unused", Value);
4635 if (res != MatchOperand_Success) {
4636 return res;
4637 }
4638
4639 int64_t Int;
4640 Int = StringSwitch<int64_t>(Value)
4641 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4642 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4643 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
4644 .Default(0xffffffff);
4645 Parser.Lex(); // eat last token
4646
4647 if (Int == 0xffffffff) {
4648 return MatchOperand_ParseFail;
4649 }
4650
4651 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
4652 return MatchOperand_Success;
4653}
4654
4655void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
4656 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
4657}
4658
4659void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
4660 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4661}
4662
4663void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
4664 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
4665}
4666
4667void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
4668 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
4669}
4670
4671void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
4672 uint64_t BasicInstType, bool skipVcc) {
4673 using namespace llvm::AMDGPU::SDWA;
4674
4675 OptionalImmIndexMap OptionalIdx;
4676 bool skippedVcc = false;
4677
4678 unsigned I = 1;
4679 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4680 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4681 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4682 }
4683
4684 for (unsigned E = Operands.size(); I != E; ++I) {
4685 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4686 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4687 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
4688 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
4689 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
4690 // Skip VCC only if we didn't skip it on previous iteration.
4691 if (BasicInstType == SIInstrFlags::VOP2 &&
4692 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
4693 skippedVcc = true;
4694 continue;
4695 } else if (BasicInstType == SIInstrFlags::VOPC &&
4696 Inst.getNumOperands() == 0) {
4697 skippedVcc = true;
4698 continue;
4699 }
4700 }
4701 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4702 Op.addRegWithInputModsOperands(Inst, 2);
4703 } else if (Op.isImm()) {
4704 // Handle optional arguments
4705 OptionalIdx[Op.getImmTy()] = I;
4706 } else {
4707 llvm_unreachable("Invalid operand type")::llvm::llvm_unreachable_internal("Invalid operand type", "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4707)
;
4708 }
4709 skippedVcc = false;
4710 }
4711
4712 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
4713 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
4714 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
4715 switch (BasicInstType) {
4716 case SIInstrFlags::VOP1:
4717 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4718 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4719 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4720 }
4721 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4722 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4723 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4724 break;
4725
4726 case SIInstrFlags::VOP2:
4727 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4728 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4729 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4730 }
4731 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4732 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4733 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4734 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
4735 break;
4736
4737 case SIInstrFlags::VOPC:
4738 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4739 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4740 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
4741 break;
4742
4743 default:
4744 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed")::llvm::llvm_unreachable_internal("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4744)
;
4745 }
4746 }
4747
4748 // special case v_mac_{f16, f32}:
4749 // it has src2 register operand that is tied to dst operand
4750 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
4751 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
4752 auto it = Inst.begin();
4753 std::advance(
4754 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
4755 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4756 }
4757}
4758
4759/// Force static initialization.
4760extern "C" void LLVMInitializeAMDGPUAsmParser() {
4761 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
4762 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
4763}
4764
4765#define GET_REGISTER_MATCHER
4766#define GET_MATCHER_IMPLEMENTATION
4767#include "AMDGPUGenAsmMatcher.inc"
4768
4769// This fuction should be defined after auto-generated include so that we have
4770// MatchClassKind enum defined
4771unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
4772 unsigned Kind) {
4773 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
4774 // But MatchInstructionImpl() expects to meet token and fails to validate
4775 // operand. This method checks if we are given immediate operand but expect to
4776 // get corresponding token.
4777 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
4778 switch (Kind) {
4779 case MCK_addr64:
4780 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
4781 case MCK_gds:
4782 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
4783 case MCK_glc:
4784 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
4785 case MCK_idxen:
4786 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
4787 case MCK_offen:
4788 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
4789 case MCK_SSrcB32:
4790 // When operands have expression values, they will return true for isToken,
4791 // because it is not possible to distinguish between a token and an
4792 // expression at parse time. MatchInstructionImpl() will always try to
4793 // match an operand as a token, when isToken returns true, and when the
4794 // name of the expression is not a valid token, the match will fail,
4795 // so we need to handle it here.
4796 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
4797 case MCK_SSrcF32:
4798 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
4799 case MCK_SoppBrTarget:
4800 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
4801 case MCK_VReg32OrOff:
4802 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
4803 case MCK_InterpSlot:
4804 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
4805 case MCK_Attr:
4806 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
4807 case MCK_AttrChan:
4808 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
4809 default:
4810 return Match_InvalidOperand;
4811 }
4812}

/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmParserExtension.h

1//===- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks --------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
11#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
12
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/MC/MCParser/MCAsmLexer.h"
16#include "llvm/MC/MCParser/MCAsmParser.h"
17#include "llvm/Support/SMLoc.h"
18
19namespace llvm {
20
21class Twine;
22
23/// \brief Generic interface for extending the MCAsmParser,
24/// which is implemented by target and object file assembly parser
25/// implementations.
26class MCAsmParserExtension {
27 MCAsmParser *Parser;
28
29protected:
30 MCAsmParserExtension();
31
32 // Helper template for implementing static dispatch functions.
33 template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
34 static bool HandleDirective(MCAsmParserExtension *Target,
35 StringRef Directive,
36 SMLoc DirectiveLoc) {
37 T *Obj = static_cast<T*>(Target);
38 return (Obj->*Handler)(Directive, DirectiveLoc);
39 }
40
41 bool BracketExpressionsSupported = false;
42
43public:
44 MCAsmParserExtension(const MCAsmParserExtension &) = delete;
45 MCAsmParserExtension &operator=(const MCAsmParserExtension &) = delete;
46 virtual ~MCAsmParserExtension();
47
48 /// \brief Initialize the extension for parsing using the given \p Parser.
49 /// The extension should use the AsmParser interfaces to register its
50 /// parsing routines.
51 virtual void Initialize(MCAsmParser &Parser);
52
53 /// \name MCAsmParser Proxy Interfaces
54 /// @{
55
56 MCContext &getContext() { return getParser().getContext(); }
57
58 MCAsmLexer &getLexer() { return getParser().getLexer(); }
5
Calling 'MCAsmParserExtension::getParser'
6
Returning from 'MCAsmParserExtension::getParser'
59 const MCAsmLexer &getLexer() const {
60 return const_cast<MCAsmParserExtension *>(this)->getLexer();
61 }
62
63 MCAsmParser &getParser() { return *Parser; }
64 const MCAsmParser &getParser() const {
65 return const_cast<MCAsmParserExtension*>(this)->getParser();
66 }
67
68 SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
69 MCStreamer &getStreamer() { return getParser().getStreamer(); }
70
71 bool Warning(SMLoc L, const Twine &Msg) {
72 return getParser().Warning(L, Msg);
73 }
74
75 bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
76 return getParser().Error(L, Msg, Range);
77 }
78
79 void Note(SMLoc L, const Twine &Msg) {
80 getParser().Note(L, Msg);
81 }
82
83 bool TokError(const Twine &Msg) {
84 return getParser().TokError(Msg);
30
Calling 'MCAsmParserExtension::getParser'
31
Returning from 'MCAsmParserExtension::getParser'
85 }
86
87 const AsmToken &Lex() { return getParser().Lex(); }
88 const AsmToken &getTok() { return getParser().getTok(); }
89 bool parseToken(AsmToken::TokenKind T,
90 const Twine &Msg = "unexpected token") {
91 return getParser().parseToken(T, Msg);
92 }
93
94 bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
95 return getParser().parseMany(parseOne, hasComma);
96 }
97
98 bool parseOptionalToken(AsmToken::TokenKind T) {
99 return getParser().parseOptionalToken(T);
100 }
101
102 bool check(bool P, const Twine &Msg) {
103 return getParser().check(P, Msg);
104 }
105
106 bool check(bool P, SMLoc Loc, const Twine &Msg) {
107 return getParser().check(P, Loc, Msg);
108 }
109
110 bool addErrorSuffix(const Twine &Suffix) {
111 return getParser().addErrorSuffix(Suffix);
112 }
113
114 bool HasBracketExpressions() const { return BracketExpressionsSupported; }
115
116 /// @}
117};
118
119} // end namespace llvm
120
121#endif // LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H

/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h

1//===- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface ------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
11#define LLVM_MC_MCPARSER_MCASMLEXER_H
12
13#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/Support/SMLoc.h"
18#include <algorithm>
19#include <cassert>
20#include <cstddef>
21#include <cstdint>
22#include <string>
23
24namespace llvm {
25
26/// Target independent representation for an assembler token.
27class AsmToken {
28public:
29 enum TokenKind {
30 // Markers
31 Eof, Error,
32
33 // String values.
34 Identifier,
35 String,
36
37 // Integer values.
38 Integer,
39 BigNum, // larger than 64 bits
40
41 // Real values.
42 Real,
43
44 // Comments
45 Comment,
46 HashDirective,
47 // No-value.
48 EndOfStatement,
49 Colon,
50 Space,
51 Plus, Minus, Tilde,
52 Slash, // '/'
53 BackSlash, // '\'
54 LParen, RParen, LBrac, RBrac, LCurly, RCurly,
55 Star, Dot, Comma, Dollar, Equal, EqualEqual,
56
57 Pipe, PipePipe, Caret,
58 Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
59 Less, LessEqual, LessLess, LessGreater,
60 Greater, GreaterEqual, GreaterGreater, At,
61
62 // MIPS unary expression operators such as %neg.
63 PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
64 PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
65 PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
66 PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
67 PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
68 PercentTprel_Lo
69 };
70
71private:
72 TokenKind Kind;
73
74 /// A reference to the entire token contents; this is always a pointer into
75 /// a memory buffer owned by the source manager.
76 StringRef Str;
77
78 APInt IntVal;
79
80public:
81 AsmToken() = default;
82 AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
83 : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
84 AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
85 : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
86
87 TokenKind getKind() const { return Kind; }
88 bool is(TokenKind K) const { return Kind == K; }
89 bool isNot(TokenKind K) const { return Kind != K; }
12
Assuming the condition is false
90
91 SMLoc getLoc() const;
92 SMLoc getEndLoc() const;
93 SMRange getLocRange() const;
94
95 /// Get the contents of a string token (without quotes).
96 StringRef getStringContents() const {
97 assert(Kind == String && "This token isn't a string!")(static_cast <bool> (Kind == String && "This token isn't a string!"
) ? void (0) : __assert_fail ("Kind == String && \"This token isn't a string!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h"
, 97, __extension__ __PRETTY_FUNCTION__))
;
98 return Str.slice(1, Str.size() - 1);
99 }
100
101 /// Get the identifier string for the current token, which should be an
102 /// identifier or a string. This gets the portion of the string which should
103 /// be used as the identifier, e.g., it does not include the quotes on
104 /// strings.
105 StringRef getIdentifier() const {
106 if (Kind == Identifier)
107 return getString();
108 return getStringContents();
109 }
110
111 /// Get the string for the current token, this includes all characters (for
112 /// example, the quotes on strings) in the token.
113 ///
114 /// The returned StringRef points into the source manager's memory buffer, and
115 /// is safe to store across calls to Lex().
116 StringRef getString() const { return Str; }
117
118 // FIXME: Don't compute this in advance, it makes every token larger, and is
119 // also not generally what we want (it is nicer for recovery etc. to lex 123br
120 // as a single token, then diagnose as an invalid number).
121 int64_t getIntVal() const {
122 assert(Kind == Integer && "This token isn't an integer!")(static_cast <bool> (Kind == Integer && "This token isn't an integer!"
) ? void (0) : __assert_fail ("Kind == Integer && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 return IntVal.getZExtValue();
124 }
125
126 APInt getAPIntVal() const {
127 assert((Kind == Integer || Kind == BigNum) &&(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
128 "This token isn't an integer!")(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
;
129 return IntVal;
130 }
131};
132
133/// A callback class which is notified of each comment in an assembly file as
134/// it is lexed.
135class AsmCommentConsumer {
136public:
137 virtual ~AsmCommentConsumer() = default;
138
139 /// Callback function for when a comment is lexed. Loc is the start of the
140 /// comment text (excluding the comment-start marker). CommentText is the text
141 /// of the comment, excluding the comment start and end markers, and the
142 /// newline for single-line comments.
143 virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
144};
145
146
147/// Generic assembler lexer interface, for use by target specific assembly
148/// lexers.
149class MCAsmLexer {
150 /// The current token, stored in the base class for faster access.
151 SmallVector<AsmToken, 1> CurTok;
152
153 /// The location and description of the current error
154 SMLoc ErrLoc;
155 std::string Err;
156
157protected: // Can only create subclasses.
158 const char *TokStart = nullptr;
159 bool SkipSpace = true;
160 bool AllowAtInIdentifier;
161 bool IsAtStartOfStatement = true;
162 AsmCommentConsumer *CommentConsumer = nullptr;
163
164 bool AltMacroMode;
165 MCAsmLexer();
166
167 virtual AsmToken LexToken() = 0;
168
169 void SetError(SMLoc errLoc, const std::string &err) {
170 ErrLoc = errLoc;
171 Err = err;
172 }
173
174public:
175 MCAsmLexer(const MCAsmLexer &) = delete;
176 MCAsmLexer &operator=(const MCAsmLexer &) = delete;
177 virtual ~MCAsmLexer();
178
179 bool IsaAltMacroMode() {
180 return AltMacroMode;
181 }
182
183 void SetAltMacroMode(bool AltMacroSet) {
184 AltMacroMode = AltMacroSet;
185 }
186
187 /// Consume the next token from the input stream and return it.
188 ///
189 /// The lexer will continuosly return the end-of-file token once the end of
190 /// the main input file has been reached.
191 const AsmToken &Lex() {
192 assert(!CurTok.empty())(static_cast <bool> (!CurTok.empty()) ? void (0) : __assert_fail
("!CurTok.empty()", "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 // Mark if we parsing out a EndOfStatement.
194 IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
195 CurTok.erase(CurTok.begin());
196 // LexToken may generate multiple tokens via UnLex but will always return
197 // the first one. Place returned value at head of CurTok vector.
198 if (CurTok.empty()) {
199 AsmToken T = LexToken();
200 CurTok.insert(CurTok.begin(), T);
201 }
202 return CurTok.front();
203 }
204
205 void UnLex(AsmToken const &Token) {
206 IsAtStartOfStatement = false;
207 CurTok.insert(CurTok.begin(), Token);
208 }
209
210 bool isAtStartOfStatement() { return IsAtStartOfStatement; }
211
212 virtual StringRef LexUntilEndOfStatement() = 0;
213
214 /// Get the current source location.
215 SMLoc getLoc() const;
216
217 /// Get the current (last) lexed token.
218 const AsmToken &getTok() const {
219 return CurTok[0];
220 }
221
222 /// Look ahead at the next token to be lexed.
223 const AsmToken peekTok(bool ShouldSkipSpace = true) {
224 AsmToken Tok;
225
226 MutableArrayRef<AsmToken> Buf(Tok);
227 size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);
228
229 assert(ReadCount == 1)(static_cast <bool> (ReadCount == 1) ? void (0) : __assert_fail
("ReadCount == 1", "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/MC/MCParser/MCAsmLexer.h"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230 (void)ReadCount;
231
232 return Tok;
233 }
234
235 /// Look ahead an arbitrary number of tokens.
236 virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
237 bool ShouldSkipSpace = true) = 0;
238
239 /// Get the current error location
240 SMLoc getErrLoc() {
241 return ErrLoc;
242 }
243
244 /// Get the current error string
245 const std::string &getErr() {
246 return Err;
247 }
248
249 /// Get the kind of current token.
250 AsmToken::TokenKind getKind() const { return getTok().getKind(); }
251
252 /// Check if the current token has kind \p K.
253 bool is(AsmToken::TokenKind K) const { return getTok().is(K); }
254
255 /// Check if the current token has kind \p K.
256 bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }
9
Calling 'MCAsmLexer::getTok'
10
Returning from 'MCAsmLexer::getTok'
11
Calling 'AsmToken::isNot'
13
Returning from 'AsmToken::isNot'
257
258 /// Set whether spaces should be ignored by the lexer
259 void setSkipSpace(bool val) { SkipSpace = val; }
260
261 bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
262 void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
263
264 void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
265 this->CommentConsumer = CommentConsumer;
266 }
267};
268
269} // end namespace llvm
270
271#endif // LLVM_MC_MCPARSER_MCASMLEXER_H

/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h

1//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_ADT_TWINE_H
11#define LLVM_ADT_TWINE_H
12
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/Support/ErrorHandling.h"
16#include <cassert>
17#include <cstdint>
18#include <string>
19
20namespace llvm {
21
22 class formatv_object_base;
23 class raw_ostream;
24
25 /// Twine - A lightweight data structure for efficiently representing the
26 /// concatenation of temporary values as strings.
27 ///
28 /// A Twine is a kind of rope, it represents a concatenated string using a
29 /// binary-tree, where the string is the preorder of the nodes. Since the
30 /// Twine can be efficiently rendered into a buffer when its result is used,
31 /// it avoids the cost of generating temporary values for intermediate string
32 /// results -- particularly in cases when the Twine result is never
33 /// required. By explicitly tracking the type of leaf nodes, we can also avoid
34 /// the creation of temporary strings for conversions operations (such as
35 /// appending an integer to a string).
36 ///
37 /// A Twine is not intended for use directly and should not be stored, its
38 /// implementation relies on the ability to store pointers to temporary stack
39 /// objects which may be deallocated at the end of a statement. Twines should
40 /// only be used accepted as const references in arguments, when an API wishes
41 /// to accept possibly-concatenated strings.
42 ///
43 /// Twines support a special 'null' value, which always concatenates to form
44 /// itself, and renders as an empty string. This can be returned from APIs to
45 /// effectively nullify any concatenations performed on the result.
46 ///
47 /// \b Implementation
48 ///
49 /// Given the nature of a Twine, it is not possible for the Twine's
50 /// concatenation method to construct interior nodes; the result must be
51 /// represented inside the returned value. For this reason a Twine object
52 /// actually holds two values, the left- and right-hand sides of a
53 /// concatenation. We also have nullary Twine objects, which are effectively
54 /// sentinel values that represent empty strings.
55 ///
56 /// Thus, a Twine can effectively have zero, one, or two children. The \see
57 /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
58 /// testing the number of children.
59 ///
60 /// We maintain a number of invariants on Twine objects (FIXME: Why):
61 /// - Nullary twines are always represented with their Kind on the left-hand
62 /// side, and the Empty kind on the right-hand side.
63 /// - Unary twines are always represented with the value on the left-hand
64 /// side, and the Empty kind on the right-hand side.
65 /// - If a Twine has another Twine as a child, that child should always be
66 /// binary (otherwise it could have been folded into the parent).
67 ///
68 /// These invariants are check by \see isValid().
69 ///
70 /// \b Efficiency Considerations
71 ///
72 /// The Twine is designed to yield efficient and small code for common
73 /// situations. For this reason, the concat() method is inlined so that
74 /// concatenations of leaf nodes can be optimized into stores directly into a
75 /// single stack allocated object.
76 ///
77 /// In practice, not all compilers can be trusted to optimize concat() fully,
78 /// so we provide two additional methods (and accompanying operator+
79 /// overloads) to guarantee that particularly important cases (cstring plus
80 /// StringRef) codegen as desired.
81 class Twine {
82 /// NodeKind - Represent the type of an argument.
83 enum NodeKind : unsigned char {
84 /// An empty string; the result of concatenating anything with it is also
85 /// empty.
86 NullKind,
87
88 /// The empty string.
89 EmptyKind,
90
91 /// A pointer to a Twine instance.
92 TwineKind,
93
94 /// A pointer to a C string instance.
95 CStringKind,
96
97 /// A pointer to an std::string instance.
98 StdStringKind,
99
100 /// A pointer to a StringRef instance.
101 StringRefKind,
102
103 /// A pointer to a SmallString instance.
104 SmallStringKind,
105
106 /// A pointer to a formatv_object_base instance.
107 FormatvObjectKind,
108
109 /// A char value, to render as a character.
110 CharKind,
111
112 /// An unsigned int value, to render as an unsigned decimal integer.
113 DecUIKind,
114
115 /// An int value, to render as a signed decimal integer.
116 DecIKind,
117
118 /// A pointer to an unsigned long value, to render as an unsigned decimal
119 /// integer.
120 DecULKind,
121
122 /// A pointer to a long value, to render as a signed decimal integer.
123 DecLKind,
124
125 /// A pointer to an unsigned long long value, to render as an unsigned
126 /// decimal integer.
127 DecULLKind,
128
129 /// A pointer to a long long value, to render as a signed decimal integer.
130 DecLLKind,
131
132 /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
133 /// integer.
134 UHexKind
135 };
136
137 union Child
138 {
139 const Twine *twine;
140 const char *cString;
141 const std::string *stdString;
142 const StringRef *stringRef;
143 const SmallVectorImpl<char> *smallString;
144 const formatv_object_base *formatvObject;
145 char character;
146 unsigned int decUI;
147 int decI;
148 const unsigned long *decUL;
149 const long *decL;
150 const unsigned long long *decULL;
151 const long long *decLL;
152 const uint64_t *uHex;
153 };
154
155 /// LHS - The prefix in the concatenation, which may be uninitialized for
156 /// Null or Empty kinds.
157 Child LHS;
158
159 /// RHS - The suffix in the concatenation, which may be uninitialized for
160 /// Null or Empty kinds.
161 Child RHS;
162
163 /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
164 NodeKind LHSKind = EmptyKind;
165
166 /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
167 NodeKind RHSKind = EmptyKind;
168
169 /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
170 explicit Twine(NodeKind Kind) : LHSKind(Kind) {
171 assert(isNullary() && "Invalid kind!")(static_cast <bool> (isNullary() && "Invalid kind!"
) ? void (0) : __assert_fail ("isNullary() && \"Invalid kind!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 171, __extension__ __PRETTY_FUNCTION__))
;
172 }
173
174 /// Construct a binary twine.
175 explicit Twine(const Twine &LHS, const Twine &RHS)
176 : LHSKind(TwineKind), RHSKind(TwineKind) {
177 this->LHS.twine = &LHS;
178 this->RHS.twine = &RHS;
179 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180 }
181
182 /// Construct a twine from explicit values.
183 explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
184 : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
185 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 185, __extension__ __PRETTY_FUNCTION__))
;
186 }
187
188 /// Check for the null twine.
189 bool isNull() const {
190 return getLHSKind() == NullKind;
191 }
192
193 /// Check for the empty twine.
194 bool isEmpty() const {
195 return getLHSKind() == EmptyKind;
196 }
197
198 /// Check if this is a nullary twine (null or empty).
199 bool isNullary() const {
200 return isNull() || isEmpty();
201 }
202
203 /// Check if this is a unary twine.
204 bool isUnary() const {
205 return getRHSKind() == EmptyKind && !isNullary();
206 }
207
208 /// Check if this is a binary twine.
209 bool isBinary() const {
210 return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
211 }
212
213 /// Check if this is a valid twine (satisfying the invariants on
214 /// order and number of arguments).
215 bool isValid() const {
216 // Nullary twines always have Empty on the RHS.
217 if (isNullary() && getRHSKind() != EmptyKind)
218 return false;
219
220 // Null should never appear on the RHS.
221 if (getRHSKind() == NullKind)
222 return false;
223
224 // The RHS cannot be non-empty if the LHS is empty.
225 if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
226 return false;
227
228 // A twine child should always be binary.
229 if (getLHSKind() == TwineKind &&
230 !LHS.twine->isBinary())
231 return false;
232 if (getRHSKind() == TwineKind &&
233 !RHS.twine->isBinary())
234 return false;
235
236 return true;
237 }
238
239 /// Get the NodeKind of the left-hand side.
240 NodeKind getLHSKind() const { return LHSKind; }
241
242 /// Get the NodeKind of the right-hand side.
243 NodeKind getRHSKind() const { return RHSKind; }
244
245 /// Print one child from a twine.
246 void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
247
248 /// Print the representation of one child from a twine.
249 void printOneChildRepr(raw_ostream &OS, Child Ptr,
250 NodeKind Kind) const;
251
252 public:
253 /// @name Constructors
254 /// @{
255
256 /// Construct from an empty string.
257 /*implicit*/ Twine() {
258 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 258, __extension__ __PRETTY_FUNCTION__))
;
259 }
260
261 Twine(const Twine &) = default;
262
263 /// Construct from a C string.
264 ///
265 /// We take care here to optimize "" into the empty twine -- this will be
266 /// optimized out for string constants. This allows Twine arguments have
267 /// default "" values, without introducing unnecessary string constants.
268 /*implicit*/ Twine(const char *Str) {
22
Calling implicit default constructor for 'Child'
23
Returning from default constructor for 'Child'
24
Calling implicit default constructor for 'Child'
25
Returning from default constructor for 'Child'
269 if (Str[0] != '\0') {
26
Taking true branch
270 LHS.cString = Str;
271 LHSKind = CStringKind;
272 } else
273 LHSKind = EmptyKind;
274
275 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 275, __extension__ __PRETTY_FUNCTION__))
;
27
Within the expansion of the macro 'assert':
a
Assuming the condition is true
276 }
277
278 /// Construct from an std::string.
279 /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
280 LHS.stdString = &Str;
281 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 281, __extension__ __PRETTY_FUNCTION__))
;
282 }
283
284 /// Construct from a StringRef.
285 /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) {
286 LHS.stringRef = &Str;
287 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 287, __extension__ __PRETTY_FUNCTION__))
;
288 }
289
290 /// Construct from a SmallString.
291 /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
292 : LHSKind(SmallStringKind) {
293 LHS.smallString = &Str;
294 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 }
296
297 /// Construct from a formatv_object_base.
298 /*implicit*/ Twine(const formatv_object_base &Fmt)
299 : LHSKind(FormatvObjectKind) {
300 LHS.formatvObject = &Fmt;
301 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 301, __extension__ __PRETTY_FUNCTION__))
;
302 }
303
304 /// Construct from a char.
305 explicit Twine(char Val) : LHSKind(CharKind) {
306 LHS.character = Val;
307 }
308
309 /// Construct from a signed char.
310 explicit Twine(signed char Val) : LHSKind(CharKind) {
311 LHS.character = static_cast<char>(Val);
312 }
313
314 /// Construct from an unsigned char.
315 explicit Twine(unsigned char Val) : LHSKind(CharKind) {
316 LHS.character = static_cast<char>(Val);
317 }
318
319 /// Construct a twine to print \p Val as an unsigned decimal integer.
320 explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
321 LHS.decUI = Val;
322 }
323
324 /// Construct a twine to print \p Val as a signed decimal integer.
325 explicit Twine(int Val) : LHSKind(DecIKind) {
326 LHS.decI = Val;
327 }
328
329 /// Construct a twine to print \p Val as an unsigned decimal integer.
330 explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
331 LHS.decUL = &Val;
332 }
333
334 /// Construct a twine to print \p Val as a signed decimal integer.
335 explicit Twine(const long &Val) : LHSKind(DecLKind) {
336 LHS.decL = &Val;
337 }
338
339 /// Construct a twine to print \p Val as an unsigned decimal integer.
340 explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
341 LHS.decULL = &Val;
342 }
343
344 /// Construct a twine to print \p Val as a signed decimal integer.
345 explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
346 LHS.decLL = &Val;
347 }
348
349 // FIXME: Unfortunately, to make sure this is as efficient as possible we
350 // need extra binary constructors from particular types. We can't rely on
351 // the compiler to be smart enough to fold operator+()/concat() down to the
352 // right thing. Yet.
353
354 /// Construct as the concatenation of a C string and a StringRef.
355 /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
356 : LHSKind(CStringKind), RHSKind(StringRefKind) {
357 this->LHS.cString = LHS;
358 this->RHS.stringRef = &RHS;
359 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 359, __extension__ __PRETTY_FUNCTION__))
;
360 }
361
362 /// Construct as the concatenation of a StringRef and a C string.
363 /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
364 : LHSKind(StringRefKind), RHSKind(CStringKind) {
365 this->LHS.stringRef = &LHS;
366 this->RHS.cString = RHS;
367 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 367, __extension__ __PRETTY_FUNCTION__))
;
368 }
369
370 /// Since the intended use of twines is as temporary objects, assignments
371 /// when concatenating might cause undefined behavior or stack corruptions
372 Twine &operator=(const Twine &) = delete;
373
374 /// Create a 'null' string, which is an empty string that always
375 /// concatenates to form another empty string.
376 static Twine createNull() {
377 return Twine(NullKind);
378 }
379
380 /// @}
381 /// @name Numeric Conversions
382 /// @{
383
384 // Construct a twine to print \p Val as an unsigned hexadecimal integer.
385 static Twine utohexstr(const uint64_t &Val) {
386 Child LHS, RHS;
387 LHS.uHex = &Val;
388 RHS.twine = nullptr;
389 return Twine(LHS, UHexKind, RHS, EmptyKind);
390 }
391
392 /// @}
393 /// @name Predicate Operations
394 /// @{
395
396 /// Check if this twine is trivially empty; a false return value does not
397 /// necessarily mean the twine is empty.
398 bool isTriviallyEmpty() const {
399 return isNullary();
400 }
401
402 /// Return true if this twine can be dynamically accessed as a single
403 /// StringRef value with getSingleStringRef().
404 bool isSingleStringRef() const {
405 if (getRHSKind() != EmptyKind) return false;
406
407 switch (getLHSKind()) {
408 case EmptyKind:
409 case CStringKind:
410 case StdStringKind:
411 case StringRefKind:
412 case SmallStringKind:
413 return true;
414 default:
415 return false;
416 }
417 }
418
419 /// @}
420 /// @name String Operations
421 /// @{
422
423 Twine concat(const Twine &Suffix) const;
424
425 /// @}
426 /// @name Output & Conversion.
427 /// @{
428
429 /// Return the twine contents as a std::string.
430 std::string str() const;
431
432 /// Append the concatenated string into the given SmallString or SmallVector.
433 void toVector(SmallVectorImpl<char> &Out) const;
434
435 /// This returns the twine as a single StringRef. This method is only valid
436 /// if isSingleStringRef() is true.
437 StringRef getSingleStringRef() const {
438 assert(isSingleStringRef() &&"This cannot be had as a single stringref!")(static_cast <bool> (isSingleStringRef() &&"This cannot be had as a single stringref!"
) ? void (0) : __assert_fail ("isSingleStringRef() &&\"This cannot be had as a single stringref!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 438, __extension__ __PRETTY_FUNCTION__))
;
439 switch (getLHSKind()) {
440 default: llvm_unreachable("Out of sync with isSingleStringRef")::llvm::llvm_unreachable_internal("Out of sync with isSingleStringRef"
, "/build/llvm-toolchain-snapshot-6.0~svn318882/include/llvm/ADT/Twine.h"
, 440)
;
441 case EmptyKind: return StringRef();
442 case CStringKind: return StringRef(LHS.cString);
443 case StdStringKind: return StringRef(*LHS.stdString);
444 case StringRefKind: return *LHS.stringRef;
445 case SmallStringKind:
446 return StringRef(LHS.smallString->data(), LHS.smallString->size());
447 }
448 }
449
450 /// This returns the twine as a single StringRef if it can be
451 /// represented as such. Otherwise the twine is written into the given
452 /// SmallVector and a StringRef to the SmallVector's data is returned.
453 StringRef toStringRef(SmallVectorImpl<char> &Out) const {
454 if (isSingleStringRef())
455 return getSingleStringRef();
456 toVector(Out);
457 return StringRef(Out.data(), Out.size());
458 }
459
460 /// This returns the twine as a single null terminated StringRef if it
461 /// can be represented as such. Otherwise the twine is written into the
462 /// given SmallVector and a StringRef to the SmallVector's data is returned.
463 ///
464 /// The returned StringRef's size does not include the null terminator.
465 StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
466
467 /// Write the concatenated string represented by this twine to the
468 /// stream \p OS.
469 void print(raw_ostream &OS) const;
470
471 /// Dump the concatenated string represented by this twine to stderr.
472 void dump() const;
473
474 /// Write the representation of this twine to the stream \p OS.
475 void printRepr(raw_ostream &OS) const;
476
477 /// Dump the representation of this twine to stderr.
478 void dumpRepr() const;
479
480 /// @}
481 };
482
483 /// @name Twine Inline Implementations
484 /// @{
485
486 inline Twine Twine::concat(const Twine &Suffix) const {
487 // Concatenation with null is null.
488 if (isNull() || Suffix.isNull())
489 return Twine(NullKind);
490
491 // Concatenation with empty yields the other side.
492 if (isEmpty())
493 return Suffix;
494 if (Suffix.isEmpty())
495 return *this;
496
497 // Otherwise we need to create a new node, taking care to fold in unary
498 // twines.
499 Child NewLHS, NewRHS;
500 NewLHS.twine = this;
501 NewRHS.twine = &Suffix;
502 NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
503 if (isUnary()) {
504 NewLHS = LHS;
505 NewLHSKind = getLHSKind();
506 }
507 if (Suffix.isUnary()) {
508 NewRHS = Suffix.LHS;
509 NewRHSKind = Suffix.getLHSKind();
510 }
511
512 return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
513 }
514
515 inline Twine operator+(const Twine &LHS, const Twine &RHS) {
516 return LHS.concat(RHS);
517 }
518
519 /// Additional overload to guarantee simplified codegen; this is equivalent to
520 /// concat().
521
522 inline Twine operator+(const char *LHS, const StringRef &RHS) {
523 return Twine(LHS, RHS);
524 }
525
526 /// Additional overload to guarantee simplified codegen; this is equivalent to
527 /// concat().
528
529 inline Twine operator+(const StringRef &LHS, const char *RHS) {
530 return Twine(LHS, RHS);
531 }
532
533 inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
534 RHS.print(OS);
535 return OS;
536 }
537
538 /// @}
539
540} // end namespace llvm
541
542#endif // LLVM_ADT_TWINE_H