Bug Summary

File:lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Warning:line 2422, column 3
1st function call argument is an uninitialized value

Annotated Source Code

[?] Use j/k keys for keyboard navigation

/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp

1//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPU.h"
11#include "AMDKernelCodeT.h"
12#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13#include "MCTargetDesc/AMDGPUTargetStreamer.h"
14#include "SIDefines.h"
15#include "Utils/AMDGPUAsmUtils.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "Utils/AMDKernelCodeTUtils.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallBitVector.h"
23#include "llvm/ADT/SmallString.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/BinaryFormat/ELF.h"
28#include "llvm/CodeGen/MachineValueType.h"
29#include "llvm/MC/MCAsmInfo.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
33#include "llvm/MC/MCInstrDesc.h"
34#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
37#include "llvm/MC/MCParser/MCAsmParserExtension.h"
38#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39#include "llvm/MC/MCParser/MCTargetAsmParser.h"
40#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
43#include "llvm/MC/MCSymbol.h"
44#include "llvm/Support/AMDGPUMetadata.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/SMLoc.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <algorithm>
53#include <cassert>
54#include <cstdint>
55#include <cstring>
56#include <iterator>
57#include <map>
58#include <memory>
59#include <string>
60
61using namespace llvm;
62using namespace llvm::AMDGPU;
63
64namespace {
65
66class AMDGPUAsmParser;
67
68enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
69
70//===----------------------------------------------------------------------===//
71// Operand
72//===----------------------------------------------------------------------===//
73
74class AMDGPUOperand : public MCParsedAsmOperand {
75 enum KindTy {
76 Token,
77 Immediate,
78 Register,
79 Expression
80 } Kind;
81
82 SMLoc StartLoc, EndLoc;
83 const AMDGPUAsmParser *AsmParser;
84
85public:
86 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
87 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
88
89 using Ptr = std::unique_ptr<AMDGPUOperand>;
90
91 struct Modifiers {
92 bool Abs = false;
93 bool Neg = false;
94 bool Sext = false;
95
96 bool hasFPModifiers() const { return Abs || Neg; }
97 bool hasIntModifiers() const { return Sext; }
98 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
99
100 int64_t getFPModifiersOperand() const {
101 int64_t Operand = 0;
102 Operand |= Abs ? SISrcMods::ABS : 0;
103 Operand |= Neg ? SISrcMods::NEG : 0;
104 return Operand;
105 }
106
107 int64_t getIntModifiersOperand() const {
108 int64_t Operand = 0;
109 Operand |= Sext ? SISrcMods::SEXT : 0;
110 return Operand;
111 }
112
113 int64_t getModifiersOperand() const {
114 assert(!(hasFPModifiers() && hasIntModifiers())(static_cast <bool> (!(hasFPModifiers() && hasIntModifiers
()) && "fp and int modifiers should not be used simultaneously"
) ? void (0) : __assert_fail ("!(hasFPModifiers() && hasIntModifiers()) && \"fp and int modifiers should not be used simultaneously\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 115, __extension__ __PRETTY_FUNCTION__))
115 && "fp and int modifiers should not be used simultaneously")(static_cast <bool> (!(hasFPModifiers() && hasIntModifiers
()) && "fp and int modifiers should not be used simultaneously"
) ? void (0) : __assert_fail ("!(hasFPModifiers() && hasIntModifiers()) && \"fp and int modifiers should not be used simultaneously\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 115, __extension__ __PRETTY_FUNCTION__))
;
116 if (hasFPModifiers()) {
117 return getFPModifiersOperand();
118 } else if (hasIntModifiers()) {
119 return getIntModifiersOperand();
120 } else {
121 return 0;
122 }
123 }
124
125 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
126 };
127
128 enum ImmTy {
129 ImmTyNone,
130 ImmTyGDS,
131 ImmTyOffen,
132 ImmTyIdxen,
133 ImmTyAddr64,
134 ImmTyOffset,
135 ImmTyInstOffset,
136 ImmTyOffset0,
137 ImmTyOffset1,
138 ImmTyGLC,
139 ImmTySLC,
140 ImmTyTFE,
141 ImmTyClampSI,
142 ImmTyOModSI,
143 ImmTyDppCtrl,
144 ImmTyDppRowMask,
145 ImmTyDppBankMask,
146 ImmTyDppBoundCtrl,
147 ImmTySdwaDstSel,
148 ImmTySdwaSrc0Sel,
149 ImmTySdwaSrc1Sel,
150 ImmTySdwaDstUnused,
151 ImmTyDMask,
152 ImmTyUNorm,
153 ImmTyDA,
154 ImmTyR128,
155 ImmTyLWE,
156 ImmTyExpTgt,
157 ImmTyExpCompr,
158 ImmTyExpVM,
159 ImmTyDFMT,
160 ImmTyNFMT,
161 ImmTyHwreg,
162 ImmTyOff,
163 ImmTySendMsg,
164 ImmTyInterpSlot,
165 ImmTyInterpAttr,
166 ImmTyAttrChan,
167 ImmTyOpSel,
168 ImmTyOpSelHi,
169 ImmTyNegLo,
170 ImmTyNegHi,
171 ImmTySwizzle,
172 ImmTyHigh
173 };
174
175 struct TokOp {
176 const char *Data;
177 unsigned Length;
178 };
179
180 struct ImmOp {
181 int64_t Val;
182 ImmTy Type;
183 bool IsFPImm;
184 Modifiers Mods;
185 };
186
187 struct RegOp {
188 unsigned RegNo;
189 bool IsForcedVOP3;
190 Modifiers Mods;
191 };
192
193 union {
194 TokOp Tok;
195 ImmOp Imm;
196 RegOp Reg;
197 const MCExpr *Expr;
198 };
199
200 bool isToken() const override {
201 if (Kind == Token)
202 return true;
203
204 if (Kind != Expression || !Expr)
205 return false;
206
207 // When parsing operands, we can't always tell if something was meant to be
208 // a token, like 'gds', or an expression that references a global variable.
209 // In this case, we assume the string is an expression, and if we need to
210 // interpret is a token, then we treat the symbol name as the token.
211 return isa<MCSymbolRefExpr>(Expr);
212 }
213
214 bool isImm() const override {
215 return Kind == Immediate;
216 }
217
218 bool isInlinableImm(MVT type) const;
219 bool isLiteralImm(MVT type) const;
220
221 bool isRegKind() const {
222 return Kind == Register;
223 }
224
225 bool isReg() const override {
226 return isRegKind() && !hasModifiers();
227 }
228
229 bool isRegOrImmWithInputMods(MVT type) const {
230 return isRegKind() || isInlinableImm(type);
231 }
232
233 bool isRegOrImmWithInt16InputMods() const {
234 return isRegOrImmWithInputMods(MVT::i16);
235 }
236
237 bool isRegOrImmWithInt32InputMods() const {
238 return isRegOrImmWithInputMods(MVT::i32);
239 }
240
241 bool isRegOrImmWithInt64InputMods() const {
242 return isRegOrImmWithInputMods(MVT::i64);
243 }
244
245 bool isRegOrImmWithFP16InputMods() const {
246 return isRegOrImmWithInputMods(MVT::f16);
247 }
248
249 bool isRegOrImmWithFP32InputMods() const {
250 return isRegOrImmWithInputMods(MVT::f32);
251 }
252
253 bool isRegOrImmWithFP64InputMods() const {
254 return isRegOrImmWithInputMods(MVT::f64);
255 }
256
257 bool isVReg() const {
258 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
259 isRegClass(AMDGPU::VReg_64RegClassID) ||
260 isRegClass(AMDGPU::VReg_96RegClassID) ||
261 isRegClass(AMDGPU::VReg_128RegClassID) ||
262 isRegClass(AMDGPU::VReg_256RegClassID) ||
263 isRegClass(AMDGPU::VReg_512RegClassID);
264 }
265
266 bool isVReg32OrOff() const {
267 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
268 }
269
270 bool isSDWARegKind() const;
271
272 bool isImmTy(ImmTy ImmT) const {
273 return isImm() && Imm.Type == ImmT;
274 }
275
276 bool isImmModifier() const {
277 return isImm() && Imm.Type != ImmTyNone;
278 }
279
280 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
281 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
282 bool isDMask() const { return isImmTy(ImmTyDMask); }
283 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
284 bool isDA() const { return isImmTy(ImmTyDA); }
285 bool isR128() const { return isImmTy(ImmTyUNorm); }
286 bool isLWE() const { return isImmTy(ImmTyLWE); }
287 bool isOff() const { return isImmTy(ImmTyOff); }
288 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
289 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
290 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
291 bool isOffen() const { return isImmTy(ImmTyOffen); }
292 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
293 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
294 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
295 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
296 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
297
298 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
299 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
300 bool isGDS() const { return isImmTy(ImmTyGDS); }
301 bool isGLC() const { return isImmTy(ImmTyGLC); }
302 bool isSLC() const { return isImmTy(ImmTySLC); }
303 bool isTFE() const { return isImmTy(ImmTyTFE); }
304 bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); }
305 bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); }
306 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
307 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
308 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
309 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
310 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
311 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
312 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
313 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
314 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
315 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
316 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
317 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
318 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
319 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
320 bool isHigh() const { return isImmTy(ImmTyHigh); }
321
322 bool isMod() const {
323 return isClampSI() || isOModSI();
324 }
325
326 bool isRegOrImm() const {
327 return isReg() || isImm();
328 }
329
330 bool isRegClass(unsigned RCID) const;
331
332 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
333 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
334 }
335
336 bool isSCSrcB16() const {
337 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
338 }
339
340 bool isSCSrcV2B16() const {
341 return isSCSrcB16();
342 }
343
344 bool isSCSrcB32() const {
345 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
346 }
347
348 bool isSCSrcB64() const {
349 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
350 }
351
352 bool isSCSrcF16() const {
353 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
354 }
355
356 bool isSCSrcV2F16() const {
357 return isSCSrcF16();
358 }
359
360 bool isSCSrcF32() const {
361 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
362 }
363
364 bool isSCSrcF64() const {
365 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
366 }
367
368 bool isSSrcB32() const {
369 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
370 }
371
372 bool isSSrcB16() const {
373 return isSCSrcB16() || isLiteralImm(MVT::i16);
374 }
375
376 bool isSSrcV2B16() const {
377 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 377)
;
378 return isSSrcB16();
379 }
380
381 bool isSSrcB64() const {
382 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
383 // See isVSrc64().
384 return isSCSrcB64() || isLiteralImm(MVT::i64);
385 }
386
387 bool isSSrcF32() const {
388 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
389 }
390
391 bool isSSrcF64() const {
392 return isSCSrcB64() || isLiteralImm(MVT::f64);
393 }
394
395 bool isSSrcF16() const {
396 return isSCSrcB16() || isLiteralImm(MVT::f16);
397 }
398
399 bool isSSrcV2F16() const {
400 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 400)
;
401 return isSSrcF16();
402 }
403
404 bool isVCSrcB32() const {
405 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
406 }
407
408 bool isVCSrcB64() const {
409 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
410 }
411
412 bool isVCSrcB16() const {
413 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
414 }
415
416 bool isVCSrcV2B16() const {
417 return isVCSrcB16();
418 }
419
420 bool isVCSrcF32() const {
421 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
422 }
423
424 bool isVCSrcF64() const {
425 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
426 }
427
428 bool isVCSrcF16() const {
429 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
430 }
431
432 bool isVCSrcV2F16() const {
433 return isVCSrcF16();
434 }
435
436 bool isVSrcB32() const {
437 return isVCSrcF32() || isLiteralImm(MVT::i32);
438 }
439
440 bool isVSrcB64() const {
441 return isVCSrcF64() || isLiteralImm(MVT::i64);
442 }
443
444 bool isVSrcB16() const {
445 return isVCSrcF16() || isLiteralImm(MVT::i16);
446 }
447
448 bool isVSrcV2B16() const {
449 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 449)
;
450 return isVSrcB16();
451 }
452
453 bool isVSrcF32() const {
454 return isVCSrcF32() || isLiteralImm(MVT::f32);
455 }
456
457 bool isVSrcF64() const {
458 return isVCSrcF64() || isLiteralImm(MVT::f64);
459 }
460
461 bool isVSrcF16() const {
462 return isVCSrcF16() || isLiteralImm(MVT::f16);
463 }
464
465 bool isVSrcV2F16() const {
466 llvm_unreachable("cannot happen")::llvm::llvm_unreachable_internal("cannot happen", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 466)
;
467 return isVSrcF16();
468 }
469
470 bool isKImmFP32() const {
471 return isLiteralImm(MVT::f32);
472 }
473
474 bool isKImmFP16() const {
475 return isLiteralImm(MVT::f16);
476 }
477
478 bool isMem() const override {
479 return false;
480 }
481
482 bool isExpr() const {
483 return Kind == Expression;
484 }
485
486 bool isSoppBrTarget() const {
487 return isExpr() || isImm();
488 }
489
490 bool isSWaitCnt() const;
491 bool isHwreg() const;
492 bool isSendMsg() const;
493 bool isSwizzle() const;
494 bool isSMRDOffset8() const;
495 bool isSMRDOffset20() const;
496 bool isSMRDLiteralOffset() const;
497 bool isDPPCtrl() const;
498 bool isGPRIdxMode() const;
499 bool isS16Imm() const;
500 bool isU16Imm() const;
501
502 StringRef getExpressionAsToken() const {
503 assert(isExpr())(static_cast <bool> (isExpr()) ? void (0) : __assert_fail
("isExpr()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 503, __extension__ __PRETTY_FUNCTION__))
;
504 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
505 return S->getSymbol().getName();
506 }
507
508 StringRef getToken() const {
509 assert(isToken())(static_cast <bool> (isToken()) ? void (0) : __assert_fail
("isToken()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510
511 if (Kind == Expression)
512 return getExpressionAsToken();
513
514 return StringRef(Tok.Data, Tok.Length);
515 }
516
517 int64_t getImm() const {
518 assert(isImm())(static_cast <bool> (isImm()) ? void (0) : __assert_fail
("isImm()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 518, __extension__ __PRETTY_FUNCTION__))
;
519 return Imm.Val;
520 }
521
522 ImmTy getImmTy() const {
523 assert(isImm())(static_cast <bool> (isImm()) ? void (0) : __assert_fail
("isImm()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 523, __extension__ __PRETTY_FUNCTION__))
;
524 return Imm.Type;
525 }
526
527 unsigned getReg() const override {
528 return Reg.RegNo;
529 }
530
531 SMLoc getStartLoc() const override {
532 return StartLoc;
533 }
534
535 SMLoc getEndLoc() const override {
536 return EndLoc;
537 }
538
539 SMRange getLocRange() const {
540 return SMRange(StartLoc, EndLoc);
541 }
542
543 Modifiers getModifiers() const {
544 assert(isRegKind() || isImmTy(ImmTyNone))(static_cast <bool> (isRegKind() || isImmTy(ImmTyNone))
? void (0) : __assert_fail ("isRegKind() || isImmTy(ImmTyNone)"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 544, __extension__ __PRETTY_FUNCTION__))
;
545 return isRegKind() ? Reg.Mods : Imm.Mods;
546 }
547
548 void setModifiers(Modifiers Mods) {
549 assert(isRegKind() || isImmTy(ImmTyNone))(static_cast <bool> (isRegKind() || isImmTy(ImmTyNone))
? void (0) : __assert_fail ("isRegKind() || isImmTy(ImmTyNone)"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 549, __extension__ __PRETTY_FUNCTION__))
;
550 if (isRegKind())
551 Reg.Mods = Mods;
552 else
553 Imm.Mods = Mods;
554 }
555
556 bool hasModifiers() const {
557 return getModifiers().hasModifiers();
558 }
559
560 bool hasFPModifiers() const {
561 return getModifiers().hasFPModifiers();
562 }
563
564 bool hasIntModifiers() const {
565 return getModifiers().hasIntModifiers();
566 }
567
568 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
569
570 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
571
572 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
573
574 template <unsigned Bitwidth>
575 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
576
577 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
578 addKImmFPOperands<16>(Inst, N);
579 }
580
581 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
582 addKImmFPOperands<32>(Inst, N);
583 }
584
585 void addRegOperands(MCInst &Inst, unsigned N) const;
586
587 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
588 if (isRegKind())
589 addRegOperands(Inst, N);
590 else if (isExpr())
591 Inst.addOperand(MCOperand::createExpr(Expr));
592 else
593 addImmOperands(Inst, N);
594 }
595
596 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
597 Modifiers Mods = getModifiers();
598 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
599 if (isRegKind()) {
600 addRegOperands(Inst, N);
601 } else {
602 addImmOperands(Inst, N, false);
603 }
604 }
605
606 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
607 assert(!hasIntModifiers())(static_cast <bool> (!hasIntModifiers()) ? void (0) : __assert_fail
("!hasIntModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 607, __extension__ __PRETTY_FUNCTION__))
;
608 addRegOrImmWithInputModsOperands(Inst, N);
609 }
610
611 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
612 assert(!hasFPModifiers())(static_cast <bool> (!hasFPModifiers()) ? void (0) : __assert_fail
("!hasFPModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 612, __extension__ __PRETTY_FUNCTION__))
;
613 addRegOrImmWithInputModsOperands(Inst, N);
614 }
615
616 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
617 Modifiers Mods = getModifiers();
618 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
619 assert(isRegKind())(static_cast <bool> (isRegKind()) ? void (0) : __assert_fail
("isRegKind()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 619, __extension__ __PRETTY_FUNCTION__))
;
620 addRegOperands(Inst, N);
621 }
622
623 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
624 assert(!hasIntModifiers())(static_cast <bool> (!hasIntModifiers()) ? void (0) : __assert_fail
("!hasIntModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 624, __extension__ __PRETTY_FUNCTION__))
;
625 addRegWithInputModsOperands(Inst, N);
626 }
627
628 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
629 assert(!hasFPModifiers())(static_cast <bool> (!hasFPModifiers()) ? void (0) : __assert_fail
("!hasFPModifiers()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 629, __extension__ __PRETTY_FUNCTION__))
;
630 addRegWithInputModsOperands(Inst, N);
631 }
632
633 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
634 if (isImm())
635 addImmOperands(Inst, N);
636 else {
637 assert(isExpr())(static_cast <bool> (isExpr()) ? void (0) : __assert_fail
("isExpr()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 637, __extension__ __PRETTY_FUNCTION__))
;
638 Inst.addOperand(MCOperand::createExpr(Expr));
639 }
640 }
641
642 static void printImmTy(raw_ostream& OS, ImmTy Type) {
643 switch (Type) {
644 case ImmTyNone: OS << "None"; break;
645 case ImmTyGDS: OS << "GDS"; break;
646 case ImmTyOffen: OS << "Offen"; break;
647 case ImmTyIdxen: OS << "Idxen"; break;
648 case ImmTyAddr64: OS << "Addr64"; break;
649 case ImmTyOffset: OS << "Offset"; break;
650 case ImmTyInstOffset: OS << "InstOffset"; break;
651 case ImmTyOffset0: OS << "Offset0"; break;
652 case ImmTyOffset1: OS << "Offset1"; break;
653 case ImmTyGLC: OS << "GLC"; break;
654 case ImmTySLC: OS << "SLC"; break;
655 case ImmTyTFE: OS << "TFE"; break;
656 case ImmTyDFMT: OS << "DFMT"; break;
657 case ImmTyNFMT: OS << "NFMT"; break;
658 case ImmTyClampSI: OS << "ClampSI"; break;
659 case ImmTyOModSI: OS << "OModSI"; break;
660 case ImmTyDppCtrl: OS << "DppCtrl"; break;
661 case ImmTyDppRowMask: OS << "DppRowMask"; break;
662 case ImmTyDppBankMask: OS << "DppBankMask"; break;
663 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
664 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
665 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
666 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
667 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
668 case ImmTyDMask: OS << "DMask"; break;
669 case ImmTyUNorm: OS << "UNorm"; break;
670 case ImmTyDA: OS << "DA"; break;
671 case ImmTyR128: OS << "R128"; break;
672 case ImmTyLWE: OS << "LWE"; break;
673 case ImmTyOff: OS << "Off"; break;
674 case ImmTyExpTgt: OS << "ExpTgt"; break;
675 case ImmTyExpCompr: OS << "ExpCompr"; break;
676 case ImmTyExpVM: OS << "ExpVM"; break;
677 case ImmTyHwreg: OS << "Hwreg"; break;
678 case ImmTySendMsg: OS << "SendMsg"; break;
679 case ImmTyInterpSlot: OS << "InterpSlot"; break;
680 case ImmTyInterpAttr: OS << "InterpAttr"; break;
681 case ImmTyAttrChan: OS << "AttrChan"; break;
682 case ImmTyOpSel: OS << "OpSel"; break;
683 case ImmTyOpSelHi: OS << "OpSelHi"; break;
684 case ImmTyNegLo: OS << "NegLo"; break;
685 case ImmTyNegHi: OS << "NegHi"; break;
686 case ImmTySwizzle: OS << "Swizzle"; break;
687 case ImmTyHigh: OS << "High"; break;
688 }
689 }
690
691 void print(raw_ostream &OS) const override {
692 switch (Kind) {
693 case Register:
694 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
695 break;
696 case Immediate:
697 OS << '<' << getImm();
698 if (getImmTy() != ImmTyNone) {
699 OS << " type: "; printImmTy(OS, getImmTy());
700 }
701 OS << " mods: " << Imm.Mods << '>';
702 break;
703 case Token:
704 OS << '\'' << getToken() << '\'';
705 break;
706 case Expression:
707 OS << "<expr " << *Expr << '>';
708 break;
709 }
710 }
711
712 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
713 int64_t Val, SMLoc Loc,
714 ImmTy Type = ImmTyNone,
715 bool IsFPImm = false) {
716 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
717 Op->Imm.Val = Val;
718 Op->Imm.IsFPImm = IsFPImm;
719 Op->Imm.Type = Type;
720 Op->Imm.Mods = Modifiers();
721 Op->StartLoc = Loc;
722 Op->EndLoc = Loc;
723 return Op;
724 }
725
726 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
727 StringRef Str, SMLoc Loc,
728 bool HasExplicitEncodingSize = true) {
729 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
730 Res->Tok.Data = Str.data();
731 Res->Tok.Length = Str.size();
732 Res->StartLoc = Loc;
733 Res->EndLoc = Loc;
734 return Res;
735 }
736
737 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
738 unsigned RegNo, SMLoc S,
739 SMLoc E,
740 bool ForceVOP3) {
741 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
742 Op->Reg.RegNo = RegNo;
743 Op->Reg.Mods = Modifiers();
744 Op->Reg.IsForcedVOP3 = ForceVOP3;
745 Op->StartLoc = S;
746 Op->EndLoc = E;
747 return Op;
748 }
749
750 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
751 const class MCExpr *Expr, SMLoc S) {
752 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
753 Op->Expr = Expr;
754 Op->StartLoc = S;
755 Op->EndLoc = S;
756 return Op;
757 }
758};
759
760raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
761 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
762 return OS;
763}
764
765//===----------------------------------------------------------------------===//
766// AsmParser
767//===----------------------------------------------------------------------===//
768
769// Holds info related to the current kernel, e.g. count of SGPRs used.
770// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
771// .amdgpu_hsa_kernel or at EOF.
772class KernelScopeInfo {
773 int SgprIndexUnusedMin = -1;
774 int VgprIndexUnusedMin = -1;
775 MCContext *Ctx = nullptr;
776
777 void usesSgprAt(int i) {
778 if (i >= SgprIndexUnusedMin) {
779 SgprIndexUnusedMin = ++i;
780 if (Ctx) {
781 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
782 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
783 }
784 }
785 }
786
787 void usesVgprAt(int i) {
788 if (i >= VgprIndexUnusedMin) {
789 VgprIndexUnusedMin = ++i;
790 if (Ctx) {
791 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
792 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
793 }
794 }
795 }
796
797public:
798 KernelScopeInfo() = default;
799
800 void initialize(MCContext &Context) {
801 Ctx = &Context;
802 usesSgprAt(SgprIndexUnusedMin = -1);
803 usesVgprAt(VgprIndexUnusedMin = -1);
804 }
805
806 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
807 switch (RegKind) {
808 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
809 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
810 default: break;
811 }
812 }
813};
814
815class AMDGPUAsmParser : public MCTargetAsmParser {
816 MCAsmParser &Parser;
817
818 // Number of extra operands parsed after the first optional operand.
819 // This may be necessary to skip hardcoded mandatory operands.
820 static const unsigned MAX_OPR_LOOKAHEAD = 1;
821
822 unsigned ForcedEncodingSize = 0;
823 bool ForcedDPP = false;
824 bool ForcedSDWA = false;
825 KernelScopeInfo KernelScope;
826
827 /// @name Auto-generated Match Functions
828 /// {
829
830#define GET_ASSEMBLER_HEADER
831#include "AMDGPUGenAsmMatcher.inc"
832
833 /// }
834
835private:
836 bool ParseAsAbsoluteExpression(uint32_t &Ret);
837 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
838 bool ParseDirectiveHSACodeObjectVersion();
839 bool ParseDirectiveHSACodeObjectISA();
840 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
841 bool ParseDirectiveAMDKernelCodeT();
842 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
843 bool ParseDirectiveAMDGPUHsaKernel();
844
845 bool ParseDirectiveISAVersion();
846 bool ParseDirectiveHSAMetadata();
847 bool ParseDirectivePALMetadata();
848
849 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
850 RegisterKind RegKind, unsigned Reg1,
851 unsigned RegNum);
852 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
853 unsigned& RegNum, unsigned& RegWidth,
854 unsigned *DwordRegIndex);
855 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
856 bool IsAtomic, bool IsAtomicReturn);
857 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
858 bool IsGdsHardcoded);
859
860public:
861 enum AMDGPUMatchResultTy {
862 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
863 };
864
865 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
866
867 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
868 const MCInstrInfo &MII,
869 const MCTargetOptions &Options)
870 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
871 MCAsmParserExtension::Initialize(Parser);
872
873 if (getFeatureBits().none()) {
874 // Set default features.
875 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
876 }
877
878 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
879
880 {
881 // TODO: make those pre-defined variables read-only.
882 // Currently there is none suitable machinery in the core llvm-mc for this.
883 // MCSymbol::isRedefinable is intended for another purpose, and
884 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
885 AMDGPU::IsaInfo::IsaVersion ISA =
886 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
887 MCContext &Ctx = getContext();
888 MCSymbol *Sym =
889 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
890 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
891 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
892 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
893 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
894 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
895 }
896 KernelScope.initialize(getContext());
897 }
898
899 bool isSI() const {
900 return AMDGPU::isSI(getSTI());
901 }
902
903 bool isCI() const {
904 return AMDGPU::isCI(getSTI());
905 }
906
907 bool isVI() const {
908 return AMDGPU::isVI(getSTI());
909 }
910
911 bool isGFX9() const {
912 return AMDGPU::isGFX9(getSTI());
913 }
914
915 bool hasInv2PiInlineImm() const {
916 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
917 }
918
919 bool hasFlatOffsets() const {
920 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
921 }
922
923 bool hasSGPR102_SGPR103() const {
924 return !isVI();
925 }
926
927 bool hasIntClamp() const {
928 return getFeatureBits()[AMDGPU::FeatureIntClamp];
929 }
930
931 AMDGPUTargetStreamer &getTargetStreamer() {
932 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
933 return static_cast<AMDGPUTargetStreamer &>(TS);
934 }
935
936 const MCRegisterInfo *getMRI() const {
937 // We need this const_cast because for some reason getContext() is not const
938 // in MCAsmParser.
939 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
940 }
941
942 const MCInstrInfo *getMII() const {
943 return &MII;
944 }
945
946 const FeatureBitset &getFeatureBits() const {
947 return getSTI().getFeatureBits();
948 }
949
950 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
951 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
952 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
953
954 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
955 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
956 bool isForcedDPP() const { return ForcedDPP; }
957 bool isForcedSDWA() const { return ForcedSDWA; }
958 ArrayRef<unsigned> getMatchedVariants() const;
959
960 std::unique_ptr<AMDGPUOperand> parseRegister();
961 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
962 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
963 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
964 unsigned Kind) override;
965 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
966 OperandVector &Operands, MCStreamer &Out,
967 uint64_t &ErrorInfo,
968 bool MatchingInlineAsm) override;
969 bool ParseDirective(AsmToken DirectiveID) override;
970 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
971 StringRef parseMnemonicSuffix(StringRef Name);
972 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
973 SMLoc NameLoc, OperandVector &Operands) override;
974 //bool ProcessInstruction(MCInst &Inst);
975
976 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
977
978 OperandMatchResultTy
979 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
980 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
981 bool (*ConvertResult)(int64_t &) = nullptr);
982
983 OperandMatchResultTy parseOperandArrayWithPrefix(
984 const char *Prefix,
985 OperandVector &Operands,
986 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
987 bool (*ConvertResult)(int64_t&) = nullptr);
988
989 OperandMatchResultTy
990 parseNamedBit(const char *Name, OperandVector &Operands,
991 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
992 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
993 StringRef &Value);
994
995 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
996 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
997 OperandMatchResultTy parseReg(OperandVector &Operands);
998 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
999 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1000 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1001 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1002 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
1003 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
1004
1005 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
1006 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1007 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
1008 void cvtExp(MCInst &Inst, const OperandVector &Operands);
1009
1010 bool parseCnt(int64_t &IntVal);
1011 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
1012 OperandMatchResultTy parseHwreg(OperandVector &Operands);
1013
1014private:
1015 struct OperandInfoTy {
1016 int64_t Id;
1017 bool IsSymbolic = false;
1018
1019 OperandInfoTy(int64_t Id_) : Id(Id_) {}
1020 };
1021
1022 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1023 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
1024
1025 void errorExpTgt();
1026 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1027
1028 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1029 bool validateConstantBusLimitations(const MCInst &Inst);
1030 bool validateEarlyClobberLimitations(const MCInst &Inst);
1031 bool validateIntClampSupported(const MCInst &Inst);
1032 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1033 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1034 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
1035
1036 bool trySkipId(const StringRef Id);
1037 bool trySkipToken(const AsmToken::TokenKind Kind);
1038 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1039 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1040 bool parseExpr(int64_t &Imm);
1041
1042public:
1043 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
1044 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
1045
1046 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
1047 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
1048 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1049 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
1050 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1051
1052 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1053 const unsigned MinVal,
1054 const unsigned MaxVal,
1055 const StringRef ErrMsg);
1056 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1057 bool parseSwizzleOffset(int64_t &Imm);
1058 bool parseSwizzleMacro(int64_t &Imm);
1059 bool parseSwizzleQuadPerm(int64_t &Imm);
1060 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1061 bool parseSwizzleBroadcast(int64_t &Imm);
1062 bool parseSwizzleSwap(int64_t &Imm);
1063 bool parseSwizzleReverse(int64_t &Imm);
1064
1065 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1066 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1067 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
1068 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1069
1070 AMDGPUOperand::Ptr defaultGLC() const;
1071 AMDGPUOperand::Ptr defaultSLC() const;
1072 AMDGPUOperand::Ptr defaultTFE() const;
1073
1074 AMDGPUOperand::Ptr defaultDMask() const;
1075 AMDGPUOperand::Ptr defaultUNorm() const;
1076 AMDGPUOperand::Ptr defaultDA() const;
1077 AMDGPUOperand::Ptr defaultR128() const;
1078 AMDGPUOperand::Ptr defaultLWE() const;
1079 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1080 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
1081 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
1082 AMDGPUOperand::Ptr defaultOffsetU12() const;
1083 AMDGPUOperand::Ptr defaultOffsetS13() const;
1084
1085 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1086
1087 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1088 OptionalImmIndexMap &OptionalIdx);
1089 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
1090 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
1091 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
1092
1093 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1094
1095 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1096 bool IsAtomic = false);
1097 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
1098
1099 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
1100 AMDGPUOperand::Ptr defaultRowMask() const;
1101 AMDGPUOperand::Ptr defaultBankMask() const;
1102 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1103 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
1104
1105 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1106 AMDGPUOperand::ImmTy Type);
1107 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
1108 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1109 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
1110 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
1111 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1112 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
1113 uint64_t BasicInstType, bool skipVcc = false);
1114};
1115
1116struct OptionalOperand {
1117 const char *Name;
1118 AMDGPUOperand::ImmTy Type;
1119 bool IsBit;
1120 bool (*ConvertResult)(int64_t&);
1121};
1122
1123} // end anonymous namespace
1124
1125// May be called with integer type with equivalent bitwidth.
1126static const fltSemantics *getFltSemantics(unsigned Size) {
1127 switch (Size) {
1128 case 4:
1129 return &APFloat::IEEEsingle();
1130 case 8:
1131 return &APFloat::IEEEdouble();
1132 case 2:
1133 return &APFloat::IEEEhalf();
1134 default:
1135 llvm_unreachable("unsupported fp type")::llvm::llvm_unreachable_internal("unsupported fp type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1135)
;
1136 }
1137}
1138
1139static const fltSemantics *getFltSemantics(MVT VT) {
1140 return getFltSemantics(VT.getSizeInBits() / 8);
1141}
1142
1143static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1144 switch (OperandType) {
1145 case AMDGPU::OPERAND_REG_IMM_INT32:
1146 case AMDGPU::OPERAND_REG_IMM_FP32:
1147 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1148 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1149 return &APFloat::IEEEsingle();
1150 case AMDGPU::OPERAND_REG_IMM_INT64:
1151 case AMDGPU::OPERAND_REG_IMM_FP64:
1152 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1153 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1154 return &APFloat::IEEEdouble();
1155 case AMDGPU::OPERAND_REG_IMM_INT16:
1156 case AMDGPU::OPERAND_REG_IMM_FP16:
1157 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1158 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1159 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1160 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1161 return &APFloat::IEEEhalf();
1162 default:
1163 llvm_unreachable("unsupported fp type")::llvm::llvm_unreachable_internal("unsupported fp type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1163)
;
1164 }
1165}
1166
1167//===----------------------------------------------------------------------===//
1168// Operand
1169//===----------------------------------------------------------------------===//
1170
1171static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1172 bool Lost;
1173
1174 // Convert literal to single precision
1175 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1176 APFloat::rmNearestTiesToEven,
1177 &Lost);
1178 // We allow precision lost but not overflow or underflow
1179 if (Status != APFloat::opOK &&
1180 Lost &&
1181 ((Status & APFloat::opOverflow) != 0 ||
1182 (Status & APFloat::opUnderflow) != 0)) {
1183 return false;
1184 }
1185
1186 return true;
1187}
1188
1189bool AMDGPUOperand::isInlinableImm(MVT type) const {
1190 if (!isImmTy(ImmTyNone)) {
1191 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1192 return false;
1193 }
1194 // TODO: We should avoid using host float here. It would be better to
1195 // check the float bit values which is what a few other places do.
1196 // We've had bot failures before due to weird NaN support on mips hosts.
1197
1198 APInt Literal(64, Imm.Val);
1199
1200 if (Imm.IsFPImm) { // We got fp literal token
1201 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
1202 return AMDGPU::isInlinableLiteral64(Imm.Val,
1203 AsmParser->hasInv2PiInlineImm());
1204 }
1205
1206 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
1207 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1208 return false;
1209
1210 if (type.getScalarSizeInBits() == 16) {
1211 return AMDGPU::isInlinableLiteral16(
1212 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
1213 AsmParser->hasInv2PiInlineImm());
1214 }
1215
1216 // Check if single precision literal is inlinable
1217 return AMDGPU::isInlinableLiteral32(
1218 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
1219 AsmParser->hasInv2PiInlineImm());
1220 }
1221
1222 // We got int literal token.
1223 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
1224 return AMDGPU::isInlinableLiteral64(Imm.Val,
1225 AsmParser->hasInv2PiInlineImm());
1226 }
1227
1228 if (type.getScalarSizeInBits() == 16) {
1229 return AMDGPU::isInlinableLiteral16(
1230 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1231 AsmParser->hasInv2PiInlineImm());
1232 }
1233
1234 return AMDGPU::isInlinableLiteral32(
1235 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
1236 AsmParser->hasInv2PiInlineImm());
1237}
1238
1239bool AMDGPUOperand::isLiteralImm(MVT type) const {
1240 // Check that this immediate can be added as literal
1241 if (!isImmTy(ImmTyNone)) {
1242 return false;
1243 }
1244
1245 if (!Imm.IsFPImm) {
1246 // We got int literal token.
1247
1248 if (type == MVT::f64 && hasFPModifiers()) {
1249 // Cannot apply fp modifiers to int literals preserving the same semantics
1250 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1251 // disable these cases.
1252 return false;
1253 }
1254
1255 unsigned Size = type.getSizeInBits();
1256 if (Size == 64)
1257 Size = 32;
1258
1259 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1260 // types.
1261 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
1262 }
1263
1264 // We got fp literal token
1265 if (type == MVT::f64) { // Expected 64-bit fp operand
1266 // We would set low 64-bits of literal to zeroes but we accept this literals
1267 return true;
1268 }
1269
1270 if (type == MVT::i64) { // Expected 64-bit int operand
1271 // We don't allow fp literals in 64-bit integer instructions. It is
1272 // unclear how we should encode them.
1273 return false;
1274 }
1275
1276 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
1277 return canLosslesslyConvertToFPType(FPLiteral, type);
1278}
1279
1280bool AMDGPUOperand::isRegClass(unsigned RCID) const {
1281 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
1282}
1283
1284bool AMDGPUOperand::isSDWARegKind() const {
1285 if (AsmParser->isVI())
1286 return isVReg();
1287 else if (AsmParser->isGFX9())
1288 return isRegKind();
1289 else
1290 return false;
1291}
1292
1293uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1294{
1295 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers())(static_cast <bool> (isImmTy(ImmTyNone) && Imm.
Mods.hasFPModifiers()) ? void (0) : __assert_fail ("isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers()"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1295, __extension__ __PRETTY_FUNCTION__))
;
1296 assert(Size == 2 || Size == 4 || Size == 8)(static_cast <bool> (Size == 2 || Size == 4 || Size == 8
) ? void (0) : __assert_fail ("Size == 2 || Size == 4 || Size == 8"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1296, __extension__ __PRETTY_FUNCTION__))
;
1297
1298 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1299
1300 if (Imm.Mods.Abs) {
1301 Val &= ~FpSignMask;
1302 }
1303 if (Imm.Mods.Neg) {
1304 Val ^= FpSignMask;
1305 }
1306
1307 return Val;
1308}
1309
1310void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
1311 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1312 Inst.getNumOperands())) {
1313 addLiteralImmOperand(Inst, Imm.Val,
1314 ApplyModifiers &
1315 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1316 } else {
1317 assert(!isImmTy(ImmTyNone) || !hasModifiers())(static_cast <bool> (!isImmTy(ImmTyNone) || !hasModifiers
()) ? void (0) : __assert_fail ("!isImmTy(ImmTyNone) || !hasModifiers()"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1317, __extension__ __PRETTY_FUNCTION__))
;
1318 Inst.addOperand(MCOperand::createImm(Imm.Val));
1319 }
1320}
1321
1322void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
1323 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1324 auto OpNum = Inst.getNumOperands();
1325 // Check that this operand accepts literals
1326 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum))(static_cast <bool> (AMDGPU::isSISrcOperand(InstDesc, OpNum
)) ? void (0) : __assert_fail ("AMDGPU::isSISrcOperand(InstDesc, OpNum)"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1326, __extension__ __PRETTY_FUNCTION__))
;
1327
1328 if (ApplyModifiers) {
1329 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum))(static_cast <bool> (AMDGPU::isSISrcFPOperand(InstDesc,
OpNum)) ? void (0) : __assert_fail ("AMDGPU::isSISrcFPOperand(InstDesc, OpNum)"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1329, __extension__ __PRETTY_FUNCTION__))
;
1330 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1331 Val = applyInputFPModifiers(Val, Size);
1332 }
1333
1334 APInt Literal(64, Val);
1335 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
1336
1337 if (Imm.IsFPImm) { // We got fp literal token
1338 switch (OpTy) {
1339 case AMDGPU::OPERAND_REG_IMM_INT64:
1340 case AMDGPU::OPERAND_REG_IMM_FP64:
1341 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1342 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1343 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1344 AsmParser->hasInv2PiInlineImm())) {
1345 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
1346 return;
1347 }
1348
1349 // Non-inlineable
1350 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
1351 // For fp operands we check if low 32 bits are zeros
1352 if (Literal.getLoBits(32) != 0) {
1353 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
1354 "Can't encode literal as exact 64-bit floating-point operand. "
1355 "Low 32-bits will be set to zero");
1356 }
1357
1358 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
1359 return;
1360 }
1361
1362 // We don't allow fp literals in 64-bit integer instructions. It is
1363 // unclear how we should encode them. This case should be checked earlier
1364 // in predicate methods (isLiteralImm())
1365 llvm_unreachable("fp literal in 64-bit integer instruction.")::llvm::llvm_unreachable_internal("fp literal in 64-bit integer instruction."
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1365)
;
1366
1367 case AMDGPU::OPERAND_REG_IMM_INT32:
1368 case AMDGPU::OPERAND_REG_IMM_FP32:
1369 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1370 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1371 case AMDGPU::OPERAND_REG_IMM_INT16:
1372 case AMDGPU::OPERAND_REG_IMM_FP16:
1373 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1374 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1375 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1376 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1377 bool lost;
1378 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
1379 // Convert literal to single precision
1380 FPLiteral.convert(*getOpFltSemantics(OpTy),
1381 APFloat::rmNearestTiesToEven, &lost);
1382 // We allow precision lost but not overflow or underflow. This should be
1383 // checked earlier in isLiteralImm()
1384
1385 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1386 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1387 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1388 ImmVal |= (ImmVal << 16);
1389 }
1390
1391 Inst.addOperand(MCOperand::createImm(ImmVal));
1392 return;
1393 }
1394 default:
1395 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1395)
;
1396 }
1397
1398 return;
1399 }
1400
1401 // We got int literal token.
1402 // Only sign extend inline immediates.
1403 // FIXME: No errors on truncation
1404 switch (OpTy) {
1405 case AMDGPU::OPERAND_REG_IMM_INT32:
1406 case AMDGPU::OPERAND_REG_IMM_FP32:
1407 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1408 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1409 if (isInt<32>(Val) &&
1410 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1411 AsmParser->hasInv2PiInlineImm())) {
1412 Inst.addOperand(MCOperand::createImm(Val));
1413 return;
1414 }
1415
1416 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1417 return;
1418
1419 case AMDGPU::OPERAND_REG_IMM_INT64:
1420 case AMDGPU::OPERAND_REG_IMM_FP64:
1421 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1422 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1423 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
1424 Inst.addOperand(MCOperand::createImm(Val));
1425 return;
1426 }
1427
1428 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1429 return;
1430
1431 case AMDGPU::OPERAND_REG_IMM_INT16:
1432 case AMDGPU::OPERAND_REG_IMM_FP16:
1433 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1434 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1435 if (isInt<16>(Val) &&
1436 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1437 AsmParser->hasInv2PiInlineImm())) {
1438 Inst.addOperand(MCOperand::createImm(Val));
1439 return;
1440 }
1441
1442 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1443 return;
1444
1445 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1446 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1447 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1448 assert(AMDGPU::isInlinableLiteral16(LiteralVal,(static_cast <bool> (AMDGPU::isInlinableLiteral16(LiteralVal
, AsmParser->hasInv2PiInlineImm())) ? void (0) : __assert_fail
("AMDGPU::isInlinableLiteral16(LiteralVal, AsmParser->hasInv2PiInlineImm())"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1449, __extension__ __PRETTY_FUNCTION__))
1449 AsmParser->hasInv2PiInlineImm()))(static_cast <bool> (AMDGPU::isInlinableLiteral16(LiteralVal
, AsmParser->hasInv2PiInlineImm())) ? void (0) : __assert_fail
("AMDGPU::isInlinableLiteral16(LiteralVal, AsmParser->hasInv2PiInlineImm())"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1449, __extension__ __PRETTY_FUNCTION__))
;
1450
1451 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1452 static_cast<uint32_t>(LiteralVal);
1453 Inst.addOperand(MCOperand::createImm(ImmVal));
1454 return;
1455 }
1456 default:
1457 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1457)
;
1458 }
1459}
1460
1461template <unsigned Bitwidth>
1462void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
1463 APInt Literal(64, Imm.Val);
1464
1465 if (!Imm.IsFPImm) {
1466 // We got int literal token.
1467 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1468 return;
1469 }
1470
1471 bool Lost;
1472 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
1473 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1474 APFloat::rmNearestTiesToEven, &Lost);
1475 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
1476}
1477
1478void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1479 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1480}
1481
1482//===----------------------------------------------------------------------===//
1483// AsmParser
1484//===----------------------------------------------------------------------===//
1485
1486static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1487 if (Is == IS_VGPR) {
1488 switch (RegWidth) {
1489 default: return -1;
1490 case 1: return AMDGPU::VGPR_32RegClassID;
1491 case 2: return AMDGPU::VReg_64RegClassID;
1492 case 3: return AMDGPU::VReg_96RegClassID;
1493 case 4: return AMDGPU::VReg_128RegClassID;
1494 case 8: return AMDGPU::VReg_256RegClassID;
1495 case 16: return AMDGPU::VReg_512RegClassID;
1496 }
1497 } else if (Is == IS_TTMP) {
1498 switch (RegWidth) {
1499 default: return -1;
1500 case 1: return AMDGPU::TTMP_32RegClassID;
1501 case 2: return AMDGPU::TTMP_64RegClassID;
1502 case 4: return AMDGPU::TTMP_128RegClassID;
1503 case 8: return AMDGPU::TTMP_256RegClassID;
1504 case 16: return AMDGPU::TTMP_512RegClassID;
1505 }
1506 } else if (Is == IS_SGPR) {
1507 switch (RegWidth) {
1508 default: return -1;
1509 case 1: return AMDGPU::SGPR_32RegClassID;
1510 case 2: return AMDGPU::SGPR_64RegClassID;
1511 case 4: return AMDGPU::SGPR_128RegClassID;
1512 case 8: return AMDGPU::SGPR_256RegClassID;
1513 case 16: return AMDGPU::SGPR_512RegClassID;
1514 }
1515 }
1516 return -1;
1517}
1518
1519static unsigned getSpecialRegForName(StringRef RegName) {
1520 return StringSwitch<unsigned>(RegName)
1521 .Case("exec", AMDGPU::EXEC)
1522 .Case("vcc", AMDGPU::VCC)
1523 .Case("flat_scratch", AMDGPU::FLAT_SCR)
1524 .Case("m0", AMDGPU::M0)
1525 .Case("scc", AMDGPU::SCC)
1526 .Case("tba", AMDGPU::TBA)
1527 .Case("tma", AMDGPU::TMA)
1528 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1529 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
1530 .Case("vcc_lo", AMDGPU::VCC_LO)
1531 .Case("vcc_hi", AMDGPU::VCC_HI)
1532 .Case("exec_lo", AMDGPU::EXEC_LO)
1533 .Case("exec_hi", AMDGPU::EXEC_HI)
1534 .Case("tma_lo", AMDGPU::TMA_LO)
1535 .Case("tma_hi", AMDGPU::TMA_HI)
1536 .Case("tba_lo", AMDGPU::TBA_LO)
1537 .Case("tba_hi", AMDGPU::TBA_HI)
1538 .Default(0);
1539}
1540
1541bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1542 SMLoc &EndLoc) {
1543 auto R = parseRegister();
1544 if (!R) return true;
1545 assert(R->isReg())(static_cast <bool> (R->isReg()) ? void (0) : __assert_fail
("R->isReg()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1545, __extension__ __PRETTY_FUNCTION__))
;
1546 RegNo = R->getReg();
1547 StartLoc = R->getStartLoc();
1548 EndLoc = R->getEndLoc();
1549 return false;
1550}
1551
1552bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1553 RegisterKind RegKind, unsigned Reg1,
1554 unsigned RegNum) {
1555 switch (RegKind) {
1556 case IS_SPECIAL:
1557 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1558 Reg = AMDGPU::EXEC;
1559 RegWidth = 2;
1560 return true;
1561 }
1562 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1563 Reg = AMDGPU::FLAT_SCR;
1564 RegWidth = 2;
1565 return true;
1566 }
1567 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1568 Reg = AMDGPU::VCC;
1569 RegWidth = 2;
1570 return true;
1571 }
1572 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1573 Reg = AMDGPU::TBA;
1574 RegWidth = 2;
1575 return true;
1576 }
1577 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1578 Reg = AMDGPU::TMA;
1579 RegWidth = 2;
1580 return true;
1581 }
1582 return false;
1583 case IS_VGPR:
1584 case IS_SGPR:
1585 case IS_TTMP:
1586 if (Reg1 != Reg + RegWidth) {
1587 return false;
1588 }
1589 RegWidth++;
1590 return true;
1591 default:
1592 llvm_unreachable("unexpected register kind")::llvm::llvm_unreachable_internal("unexpected register kind",
"/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1592)
;
1593 }
1594}
1595
1596bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1597 unsigned &RegNum, unsigned &RegWidth,
1598 unsigned *DwordRegIndex) {
1599 if (DwordRegIndex) { *DwordRegIndex = 0; }
1600 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1601 if (getLexer().is(AsmToken::Identifier)) {
1602 StringRef RegName = Parser.getTok().getString();
1603 if ((Reg = getSpecialRegForName(RegName))) {
1604 Parser.Lex();
1605 RegKind = IS_SPECIAL;
1606 } else {
1607 unsigned RegNumIndex = 0;
1608 if (RegName[0] == 'v') {
1609 RegNumIndex = 1;
1610 RegKind = IS_VGPR;
1611 } else if (RegName[0] == 's') {
1612 RegNumIndex = 1;
1613 RegKind = IS_SGPR;
1614 } else if (RegName.startswith("ttmp")) {
1615 RegNumIndex = strlen("ttmp");
1616 RegKind = IS_TTMP;
1617 } else {
1618 return false;
1619 }
1620 if (RegName.size() > RegNumIndex) {
1621 // Single 32-bit register: vXX.
1622 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1623 return false;
1624 Parser.Lex();
1625 RegWidth = 1;
1626 } else {
1627 // Range of registers: v[XX:YY]. ":YY" is optional.
1628 Parser.Lex();
1629 int64_t RegLo, RegHi;
1630 if (getLexer().isNot(AsmToken::LBrac))
1631 return false;
1632 Parser.Lex();
1633
1634 if (getParser().parseAbsoluteExpression(RegLo))
1635 return false;
1636
1637 const bool isRBrace = getLexer().is(AsmToken::RBrac);
1638 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1639 return false;
1640 Parser.Lex();
1641
1642 if (isRBrace) {
1643 RegHi = RegLo;
1644 } else {
1645 if (getParser().parseAbsoluteExpression(RegHi))
1646 return false;
1647
1648 if (getLexer().isNot(AsmToken::RBrac))
1649 return false;
1650 Parser.Lex();
1651 }
1652 RegNum = (unsigned) RegLo;
1653 RegWidth = (RegHi - RegLo) + 1;
1654 }
1655 }
1656 } else if (getLexer().is(AsmToken::LBrac)) {
1657 // List of consecutive registers: [s0,s1,s2,s3]
1658 Parser.Lex();
1659 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
1660 return false;
1661 if (RegWidth != 1)
1662 return false;
1663 RegisterKind RegKind1;
1664 unsigned Reg1, RegNum1, RegWidth1;
1665 do {
1666 if (getLexer().is(AsmToken::Comma)) {
1667 Parser.Lex();
1668 } else if (getLexer().is(AsmToken::RBrac)) {
1669 Parser.Lex();
1670 break;
1671 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
1672 if (RegWidth1 != 1) {
1673 return false;
1674 }
1675 if (RegKind1 != RegKind) {
1676 return false;
1677 }
1678 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1679 return false;
1680 }
1681 } else {
1682 return false;
1683 }
1684 } while (true);
1685 } else {
1686 return false;
1687 }
1688 switch (RegKind) {
1689 case IS_SPECIAL:
1690 RegNum = 0;
1691 RegWidth = 1;
1692 break;
1693 case IS_VGPR:
1694 case IS_SGPR:
1695 case IS_TTMP:
1696 {
1697 unsigned Size = 1;
1698 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1699 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
1700 Size = std::min(RegWidth, 4u);
1701 }
1702 if (RegNum % Size != 0)
1703 return false;
1704 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
1705 RegNum = RegNum / Size;
1706 int RCID = getRegClass(RegKind, RegWidth);
1707 if (RCID == -1)
1708 return false;
1709 const MCRegisterClass RC = TRI->getRegClass(RCID);
1710 if (RegNum >= RC.getNumRegs())
1711 return false;
1712 Reg = RC.getRegister(RegNum);
1713 break;
1714 }
1715
1716 default:
1717 llvm_unreachable("unexpected register kind")::llvm::llvm_unreachable_internal("unexpected register kind",
"/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1717)
;
1718 }
1719
1720 if (!subtargetHasRegister(*TRI, Reg))
1721 return false;
1722 return true;
1723}
1724
1725std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
1726 const auto &Tok = Parser.getTok();
1727 SMLoc StartLoc = Tok.getLoc();
1728 SMLoc EndLoc = Tok.getEndLoc();
1729 RegisterKind RegKind;
1730 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
1731
1732 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
1733 return nullptr;
1734 }
1735 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
1736 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
1737}
1738
1739bool
1740AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1741 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1742 (getLexer().getKind() == AsmToken::Integer ||
1743 getLexer().getKind() == AsmToken::Real)) {
1744 // This is a workaround for handling operands like these:
1745 // |1.0|
1746 // |-1|
1747 // This syntax is not compatible with syntax of standard
1748 // MC expressions (due to the trailing '|').
1749
1750 SMLoc EndLoc;
1751 const MCExpr *Expr;
1752
1753 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1754 return true;
1755 }
1756
1757 return !Expr->evaluateAsAbsolute(Val);
1758 }
1759
1760 return getParser().parseAbsoluteExpression(Val);
1761}
1762
1763OperandMatchResultTy
1764AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
1765 // TODO: add syntactic sugar for 1/(2*PI)
1766 bool Minus = false;
1767 if (getLexer().getKind() == AsmToken::Minus) {
1768 const AsmToken NextToken = getLexer().peekTok();
1769 if (!NextToken.is(AsmToken::Integer) &&
1770 !NextToken.is(AsmToken::Real)) {
1771 return MatchOperand_NoMatch;
1772 }
1773 Minus = true;
1774 Parser.Lex();
1775 }
1776
1777 SMLoc S = Parser.getTok().getLoc();
1778 switch(getLexer().getKind()) {
1779 case AsmToken::Integer: {
1780 int64_t IntVal;
1781 if (parseAbsoluteExpr(IntVal, AbsMod))
1782 return MatchOperand_ParseFail;
1783 if (Minus)
1784 IntVal *= -1;
1785 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
1786 return MatchOperand_Success;
1787 }
1788 case AsmToken::Real: {
1789 int64_t IntVal;
1790 if (parseAbsoluteExpr(IntVal, AbsMod))
1791 return MatchOperand_ParseFail;
1792
1793 APFloat F(BitsToDouble(IntVal));
1794 if (Minus)
1795 F.changeSign();
1796 Operands.push_back(
1797 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
1798 AMDGPUOperand::ImmTyNone, true));
1799 return MatchOperand_Success;
1800 }
1801 default:
1802 return MatchOperand_NoMatch;
1803 }
1804}
1805
1806OperandMatchResultTy
1807AMDGPUAsmParser::parseReg(OperandVector &Operands) {
1808 if (auto R = parseRegister()) {
1809 assert(R->isReg())(static_cast <bool> (R->isReg()) ? void (0) : __assert_fail
("R->isReg()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 1809, __extension__ __PRETTY_FUNCTION__))
;
1810 R->Reg.IsForcedVOP3 = isForcedVOP3();
1811 Operands.push_back(std::move(R));
1812 return MatchOperand_Success;
1813 }
1814 return MatchOperand_NoMatch;
1815}
1816
1817OperandMatchResultTy
1818AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1819 auto res = parseImm(Operands, AbsMod);
1820 if (res != MatchOperand_NoMatch) {
1821 return res;
1822 }
1823
1824 return parseReg(Operands);
1825}
1826
1827OperandMatchResultTy
1828AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1829 bool AllowImm) {
1830 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
1831
1832 if (getLexer().getKind()== AsmToken::Minus) {
1833 const AsmToken NextToken = getLexer().peekTok();
1834
1835 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1836 if (NextToken.is(AsmToken::Minus)) {
1837 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1838 return MatchOperand_ParseFail;
1839 }
1840
1841 // '-' followed by an integer literal N should be interpreted as integer
1842 // negation rather than a floating-point NEG modifier applied to N.
1843 // Beside being contr-intuitive, such use of floating-point NEG modifier
1844 // results in different meaning of integer literals used with VOP1/2/C
1845 // and VOP3, for example:
1846 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1847 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1848 // Negative fp literals should be handled likewise for unifomtity
1849 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1850 Parser.Lex();
1851 Negate = true;
1852 }
1853 }
1854
1855 if (getLexer().getKind() == AsmToken::Identifier &&
1856 Parser.getTok().getString() == "neg") {
1857 if (Negate) {
1858 Error(Parser.getTok().getLoc(), "expected register or immediate");
1859 return MatchOperand_ParseFail;
1860 }
1861 Parser.Lex();
1862 Negate2 = true;
1863 if (getLexer().isNot(AsmToken::LParen)) {
1864 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1865 return MatchOperand_ParseFail;
1866 }
1867 Parser.Lex();
1868 }
1869
1870 if (getLexer().getKind() == AsmToken::Identifier &&
1871 Parser.getTok().getString() == "abs") {
1872 Parser.Lex();
1873 Abs2 = true;
1874 if (getLexer().isNot(AsmToken::LParen)) {
1875 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1876 return MatchOperand_ParseFail;
1877 }
1878 Parser.Lex();
1879 }
1880
1881 if (getLexer().getKind() == AsmToken::Pipe) {
1882 if (Abs2) {
1883 Error(Parser.getTok().getLoc(), "expected register or immediate");
1884 return MatchOperand_ParseFail;
1885 }
1886 Parser.Lex();
1887 Abs = true;
1888 }
1889
1890 OperandMatchResultTy Res;
1891 if (AllowImm) {
1892 Res = parseRegOrImm(Operands, Abs);
1893 } else {
1894 Res = parseReg(Operands);
1895 }
1896 if (Res != MatchOperand_Success) {
1897 return Res;
1898 }
1899
1900 AMDGPUOperand::Modifiers Mods;
1901 if (Abs) {
1902 if (getLexer().getKind() != AsmToken::Pipe) {
1903 Error(Parser.getTok().getLoc(), "expected vertical bar");
1904 return MatchOperand_ParseFail;
1905 }
1906 Parser.Lex();
1907 Mods.Abs = true;
1908 }
1909 if (Abs2) {
1910 if (getLexer().isNot(AsmToken::RParen)) {
1911 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1912 return MatchOperand_ParseFail;
1913 }
1914 Parser.Lex();
1915 Mods.Abs = true;
1916 }
1917
1918 if (Negate) {
1919 Mods.Neg = true;
1920 } else if (Negate2) {
1921 if (getLexer().isNot(AsmToken::RParen)) {
1922 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1923 return MatchOperand_ParseFail;
1924 }
1925 Parser.Lex();
1926 Mods.Neg = true;
1927 }
1928
1929 if (Mods.hasFPModifiers()) {
1930 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1931 Op.setModifiers(Mods);
1932 }
1933 return MatchOperand_Success;
1934}
1935
1936OperandMatchResultTy
1937AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1938 bool AllowImm) {
1939 bool Sext = false;
1940
1941 if (getLexer().getKind() == AsmToken::Identifier &&
1942 Parser.getTok().getString() == "sext") {
1943 Parser.Lex();
1944 Sext = true;
1945 if (getLexer().isNot(AsmToken::LParen)) {
1946 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1947 return MatchOperand_ParseFail;
1948 }
1949 Parser.Lex();
1950 }
1951
1952 OperandMatchResultTy Res;
1953 if (AllowImm) {
1954 Res = parseRegOrImm(Operands);
1955 } else {
1956 Res = parseReg(Operands);
1957 }
1958 if (Res != MatchOperand_Success) {
1959 return Res;
1960 }
1961
1962 AMDGPUOperand::Modifiers Mods;
1963 if (Sext) {
1964 if (getLexer().isNot(AsmToken::RParen)) {
1965 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1966 return MatchOperand_ParseFail;
1967 }
1968 Parser.Lex();
1969 Mods.Sext = true;
1970 }
1971
1972 if (Mods.hasIntModifiers()) {
1973 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1974 Op.setModifiers(Mods);
1975 }
1976
1977 return MatchOperand_Success;
1978}
1979
1980OperandMatchResultTy
1981AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
1982 return parseRegOrImmWithFPInputMods(Operands, false);
1983}
1984
1985OperandMatchResultTy
1986AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
1987 return parseRegOrImmWithIntInputMods(Operands, false);
1988}
1989
1990OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1991 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1992 if (Reg) {
1993 Operands.push_back(std::move(Reg));
1994 return MatchOperand_Success;
1995 }
1996
1997 const AsmToken &Tok = Parser.getTok();
1998 if (Tok.getString() == "off") {
1999 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
2000 AMDGPUOperand::ImmTyOff, false));
2001 Parser.Lex();
2002 return MatchOperand_Success;
2003 }
2004
2005 return MatchOperand_NoMatch;
2006}
2007
2008unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
2009 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2010
2011 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
2012 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2013 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2014 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
2015 return Match_InvalidOperand;
2016
2017 if ((TSFlags & SIInstrFlags::VOP3) &&
2018 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2019 getForcedEncodingSize() != 64)
2020 return Match_PreferE32;
2021
2022 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2023 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
2024 // v_mac_f32/16 allow only dst_sel == DWORD;
2025 auto OpNum =
2026 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
2027 const auto &Op = Inst.getOperand(OpNum);
2028 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2029 return Match_InvalidOperand;
2030 }
2031 }
2032
2033 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2034 // FIXME: Produces error without correct column reported.
2035 auto OpNum =
2036 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2037 const auto &Op = Inst.getOperand(OpNum);
2038 if (Op.getImm() != 0)
2039 return Match_InvalidOperand;
2040 }
2041
2042 return Match_Success;
2043}
2044
2045// What asm variants we should check
2046ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2047 if (getForcedEncodingSize() == 32) {
2048 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2049 return makeArrayRef(Variants);
2050 }
2051
2052 if (isForcedVOP3()) {
2053 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2054 return makeArrayRef(Variants);
2055 }
2056
2057 if (isForcedSDWA()) {
2058 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2059 AMDGPUAsmVariants::SDWA9};
2060 return makeArrayRef(Variants);
2061 }
2062
2063 if (isForcedDPP()) {
2064 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2065 return makeArrayRef(Variants);
2066 }
2067
2068 static const unsigned Variants[] = {
2069 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
2070 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
2071 };
2072
2073 return makeArrayRef(Variants);
2074}
2075
2076unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2077 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2078 const unsigned Num = Desc.getNumImplicitUses();
2079 for (unsigned i = 0; i < Num; ++i) {
2080 unsigned Reg = Desc.ImplicitUses[i];
2081 switch (Reg) {
2082 case AMDGPU::FLAT_SCR:
2083 case AMDGPU::VCC:
2084 case AMDGPU::M0:
2085 return Reg;
2086 default:
2087 break;
2088 }
2089 }
2090 return AMDGPU::NoRegister;
2091}
2092
2093// NB: This code is correct only when used to check constant
2094// bus limitations because GFX7 support no f16 inline constants.
2095// Note that there are no cases when a GFX7 opcode violates
2096// constant bus limitations due to the use of an f16 constant.
2097bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2098 unsigned OpIdx) const {
2099 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2100
2101 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2102 return false;
2103 }
2104
2105 const MCOperand &MO = Inst.getOperand(OpIdx);
2106
2107 int64_t Val = MO.getImm();
2108 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2109
2110 switch (OpSize) { // expected operand size
2111 case 8:
2112 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2113 case 4:
2114 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2115 case 2: {
2116 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2117 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2118 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2119 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2120 } else {
2121 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2122 }
2123 }
2124 default:
2125 llvm_unreachable("invalid operand size")::llvm::llvm_unreachable_internal("invalid operand size", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2125)
;
2126 }
2127}
2128
2129bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2130 const MCOperand &MO = Inst.getOperand(OpIdx);
2131 if (MO.isImm()) {
2132 return !isInlineConstant(Inst, OpIdx);
2133 }
2134 return !MO.isReg() ||
2135 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
2136}
2137
2138bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
2139 const unsigned Opcode = Inst.getOpcode();
2140 const MCInstrDesc &Desc = MII.get(Opcode);
2141 unsigned ConstantBusUseCount = 0;
2142
2143 if (Desc.TSFlags &
2144 (SIInstrFlags::VOPC |
2145 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
2146 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2147 SIInstrFlags::SDWA)) {
2148 // Check special imm operands (used by madmk, etc)
2149 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2150 ++ConstantBusUseCount;
2151 }
2152
2153 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2154 if (SGPRUsed != AMDGPU::NoRegister) {
2155 ++ConstantBusUseCount;
2156 }
2157
2158 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2159 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2160 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2161
2162 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2163
2164 for (int OpIdx : OpIndices) {
2165 if (OpIdx == -1) break;
2166
2167 const MCOperand &MO = Inst.getOperand(OpIdx);
2168 if (usesConstantBus(Inst, OpIdx)) {
2169 if (MO.isReg()) {
2170 const unsigned Reg = mc2PseudoReg(MO.getReg());
2171 // Pairs of registers with a partial intersections like these
2172 // s0, s[0:1]
2173 // flat_scratch_lo, flat_scratch
2174 // flat_scratch_lo, flat_scratch_hi
2175 // are theoretically valid but they are disabled anyway.
2176 // Note that this code mimics SIInstrInfo::verifyInstruction
2177 if (Reg != SGPRUsed) {
2178 ++ConstantBusUseCount;
2179 }
2180 SGPRUsed = Reg;
2181 } else { // Expression or a literal
2182 ++ConstantBusUseCount;
2183 }
2184 }
2185 }
2186 }
2187
2188 return ConstantBusUseCount <= 1;
2189}
2190
2191bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
2192 const unsigned Opcode = Inst.getOpcode();
2193 const MCInstrDesc &Desc = MII.get(Opcode);
2194
2195 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2196 if (DstIdx == -1 ||
2197 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2198 return true;
2199 }
2200
2201 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2202
2203 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2204 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2205 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2206
2207 assert(DstIdx != -1)(static_cast <bool> (DstIdx != -1) ? void (0) : __assert_fail
("DstIdx != -1", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2207, __extension__ __PRETTY_FUNCTION__))
;
2208 const MCOperand &Dst = Inst.getOperand(DstIdx);
2209 assert(Dst.isReg())(static_cast <bool> (Dst.isReg()) ? void (0) : __assert_fail
("Dst.isReg()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2209, __extension__ __PRETTY_FUNCTION__))
;
2210 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2211
2212 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2213
2214 for (int SrcIdx : SrcIndices) {
2215 if (SrcIdx == -1) break;
2216 const MCOperand &Src = Inst.getOperand(SrcIdx);
2217 if (Src.isReg()) {
2218 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2219 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2220 return false;
2221 }
2222 }
2223 }
2224
2225 return true;
2226}
2227
2228bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2229
2230 const unsigned Opc = Inst.getOpcode();
2231 const MCInstrDesc &Desc = MII.get(Opc);
2232
2233 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2234 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2235 assert(ClampIdx != -1)(static_cast <bool> (ClampIdx != -1) ? void (0) : __assert_fail
("ClampIdx != -1", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2235, __extension__ __PRETTY_FUNCTION__))
;
2236 return Inst.getOperand(ClampIdx).getImm() == 0;
2237 }
2238
2239 return true;
2240}
2241
2242bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2243 const SMLoc &IDLoc) {
2244 if (!validateConstantBusLimitations(Inst)) {
2245 Error(IDLoc,
2246 "invalid operand (violates constant bus restrictions)");
2247 return false;
2248 }
2249 if (!validateEarlyClobberLimitations(Inst)) {
2250 Error(IDLoc,
2251 "destination must be different than all sources");
2252 return false;
2253 }
2254 if (!validateIntClampSupported(Inst)) {
2255 Error(IDLoc,
2256 "integer clamping is not supported on this GPU");
2257 return false;
2258 }
2259
2260 return true;
2261}
2262
2263static std::string AMDGPUMnemonicSpellCheck(StringRef S, uint64_t FBS,
2264 unsigned VariantID = 0);
2265
2266bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2267 OperandVector &Operands,
2268 MCStreamer &Out,
2269 uint64_t &ErrorInfo,
2270 bool MatchingInlineAsm) {
2271 MCInst Inst;
2272 unsigned Result = Match_Success;
2273 for (auto Variant : getMatchedVariants()) {
2274 uint64_t EI;
2275 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2276 Variant);
2277 // We order match statuses from least to most specific. We use most specific
2278 // status as resulting
2279 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2280 if ((R == Match_Success) ||
2281 (R == Match_PreferE32) ||
2282 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2283 (R == Match_InvalidOperand && Result != Match_MissingFeature
2284 && Result != Match_PreferE32) ||
2285 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2286 && Result != Match_MissingFeature
2287 && Result != Match_PreferE32)) {
2288 Result = R;
2289 ErrorInfo = EI;
2290 }
2291 if (R == Match_Success)
2292 break;
2293 }
2294
2295 switch (Result) {
2296 default: break;
2297 case Match_Success:
2298 if (!validateInstruction(Inst, IDLoc)) {
2299 return true;
2300 }
2301 Inst.setLoc(IDLoc);
2302 Out.EmitInstruction(Inst, getSTI());
2303 return false;
2304
2305 case Match_MissingFeature:
2306 return Error(IDLoc, "instruction not supported on this GPU");
2307
2308 case Match_MnemonicFail: {
2309 uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
2310 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2311 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2312 return Error(IDLoc, "invalid instruction" + Suggestion,
2313 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2314 }
2315
2316 case Match_InvalidOperand: {
2317 SMLoc ErrorLoc = IDLoc;
2318 if (ErrorInfo != ~0ULL) {
2319 if (ErrorInfo >= Operands.size()) {
2320 return Error(IDLoc, "too few operands for instruction");
2321 }
2322 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2323 if (ErrorLoc == SMLoc())
2324 ErrorLoc = IDLoc;
2325 }
2326 return Error(ErrorLoc, "invalid operand for instruction");
2327 }
2328
2329 case Match_PreferE32:
2330 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2331 "should be encoded as e32");
2332 }
2333 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2333)
;
2334}
2335
2336bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2337 int64_t Tmp = -1;
2338 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
5
Calling 'MCAsmParserExtension::getLexer'
8
Returning from 'MCAsmParserExtension::getLexer'
9
Calling 'MCAsmLexer::isNot'
15
Returning from 'MCAsmLexer::isNot'
2339 return true;
2340 }
2341 if (getParser().parseAbsoluteExpression(Tmp)) {
16
Calling 'MCAsmParserExtension::getParser'
17
Returning from 'MCAsmParserExtension::getParser'
18
Assuming the condition is true
19
Taking true branch
2342 return true;
2343 }
2344 Ret = static_cast<uint32_t>(Tmp);
2345 return false;
2346}
2347
2348bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2349 uint32_t &Minor) {
2350 if (ParseAsAbsoluteExpression(Major))
4
Calling 'AMDGPUAsmParser::ParseAsAbsoluteExpression'
20
Returning from 'AMDGPUAsmParser::ParseAsAbsoluteExpression'
21
Taking true branch
2351 return TokError("invalid major version");
22
Calling constructor for 'Twine'
29
Returning from constructor for 'Twine'
30
Calling 'MCAsmParserExtension::TokError'
33
Returning from 'MCAsmParserExtension::TokError'
2352
2353 if (getLexer().isNot(AsmToken::Comma))
2354 return TokError("minor version number required, comma expected");
2355 Lex();
2356
2357 if (ParseAsAbsoluteExpression(Minor))
2358 return TokError("invalid minor version");
2359
2360 return false;
2361}
2362
2363bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
2364 uint32_t Major;
2365 uint32_t Minor;
2366
2367 if (ParseDirectiveMajorMinor(Major, Minor))
2368 return true;
2369
2370 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2371 return false;
2372}
2373
2374bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
2375 uint32_t Major;
1
'Major' declared without an initial value
2376 uint32_t Minor;
2377 uint32_t Stepping;
2378 StringRef VendorName;
2379 StringRef ArchName;
2380
2381 // If this directive has no arguments, then use the ISA version for the
2382 // targeted GPU.
2383 if (getLexer().is(AsmToken::EndOfStatement)) {
2
Taking false branch
2384 AMDGPU::IsaInfo::IsaVersion ISA =
2385 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
2386 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2387 ISA.Stepping,
2388 "AMD", "AMDGPU");
2389 return false;
2390 }
2391
2392 if (ParseDirectiveMajorMinor(Major, Minor))
3
Calling 'AMDGPUAsmParser::ParseDirectiveMajorMinor'
34
Returning from 'AMDGPUAsmParser::ParseDirectiveMajorMinor'
35
Assuming the condition is false
36
Taking false branch
2393 return true;
2394
2395 if (getLexer().isNot(AsmToken::Comma))
37
Taking false branch
2396 return TokError("stepping version number required, comma expected");
2397 Lex();
2398
2399 if (ParseAsAbsoluteExpression(Stepping))
38
Taking false branch
2400 return TokError("invalid stepping version");
2401
2402 if (getLexer().isNot(AsmToken::Comma))
39
Taking false branch
2403 return TokError("vendor name required, comma expected");
2404 Lex();
2405
2406 if (getLexer().isNot(AsmToken::String))
40
Taking false branch
2407 return TokError("invalid vendor name");
2408
2409 VendorName = getLexer().getTok().getStringContents();
2410 Lex();
2411
2412 if (getLexer().isNot(AsmToken::Comma))
41
Taking false branch
2413 return TokError("arch name required, comma expected");
2414 Lex();
2415
2416 if (getLexer().isNot(AsmToken::String))
42
Taking false branch
2417 return TokError("invalid arch name");
2418
2419 ArchName = getLexer().getTok().getStringContents();
2420 Lex();
2421
2422 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
43
1st function call argument is an uninitialized value
2423 VendorName, ArchName);
2424 return false;
2425}
2426
2427bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2428 amd_kernel_code_t &Header) {
2429 SmallString<40> ErrStr;
2430 raw_svector_ostream Err(ErrStr);
2431 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
2432 return TokError(Err.str());
2433 }
2434 Lex();
2435 return false;
2436}
2437
2438bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
2439 amd_kernel_code_t Header;
2440 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
2441
2442 while (true) {
2443 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2444 // will set the current token to EndOfStatement.
2445 while(getLexer().is(AsmToken::EndOfStatement))
2446 Lex();
2447
2448 if (getLexer().isNot(AsmToken::Identifier))
2449 return TokError("expected value identifier or .end_amd_kernel_code_t");
2450
2451 StringRef ID = getLexer().getTok().getIdentifier();
2452 Lex();
2453
2454 if (ID == ".end_amd_kernel_code_t")
2455 break;
2456
2457 if (ParseAMDKernelCodeTValue(ID, Header))
2458 return true;
2459 }
2460
2461 getTargetStreamer().EmitAMDKernelCodeT(Header);
2462
2463 return false;
2464}
2465
2466bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2467 if (getLexer().isNot(AsmToken::Identifier))
2468 return TokError("expected symbol name");
2469
2470 StringRef KernelName = Parser.getTok().getString();
2471
2472 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2473 ELF::STT_AMDGPU_HSA_KERNEL);
2474 Lex();
2475 KernelScope.initialize(getContext());
2476 return false;
2477}
2478
2479bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
2480 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
2481 return Error(getParser().getTok().getLoc(),
2482 ".amd_amdgpu_isa directive is not available on non-amdgcn "
2483 "architectures");
2484 }
2485
2486 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
2487
2488 std::string ISAVersionStringFromSTI;
2489 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
2490 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
2491
2492 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
2493 return Error(getParser().getTok().getLoc(),
2494 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
2495 "arguments specified through the command line");
2496 }
2497
2498 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
2499 Lex();
2500
2501 return false;
2502}
2503
2504bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
2505 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
2506 return Error(getParser().getTok().getLoc(),
2507 (Twine(HSAMD::AssemblerDirectiveBegin) + Twine(" directive is "
2508 "not available on non-amdhsa OSes")).str());
2509 }
2510
2511 std::string HSAMetadataString;
2512 raw_string_ostream YamlStream(HSAMetadataString);
2513
2514 getLexer().setSkipSpace(false);
2515
2516 bool FoundEnd = false;
2517 while (!getLexer().is(AsmToken::Eof)) {
2518 while (getLexer().is(AsmToken::Space)) {
2519 YamlStream << getLexer().getTok().getString();
2520 Lex();
2521 }
2522
2523 if (getLexer().is(AsmToken::Identifier)) {
2524 StringRef ID = getLexer().getTok().getIdentifier();
2525 if (ID == AMDGPU::HSAMD::AssemblerDirectiveEnd) {
2526 Lex();
2527 FoundEnd = true;
2528 break;
2529 }
2530 }
2531
2532 YamlStream << Parser.parseStringToEndOfStatement()
2533 << getContext().getAsmInfo()->getSeparatorString();
2534
2535 Parser.eatToEndOfStatement();
2536 }
2537
2538 getLexer().setSkipSpace(true);
2539
2540 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
2541 return TokError(Twine("expected directive ") +
2542 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
2543 }
2544
2545 YamlStream.flush();
2546
2547 if (!getTargetStreamer().EmitHSAMetadata(HSAMetadataString))
2548 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
2549
2550 return false;
2551}
2552
2553bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
2554 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
2555 return Error(getParser().getTok().getLoc(),
2556 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
2557 "not available on non-amdpal OSes")).str());
2558 }
2559
2560 PALMD::Metadata PALMetadata;
2561 for (;;) {
2562 uint32_t Value;
2563 if (ParseAsAbsoluteExpression(Value)) {
2564 return TokError(Twine("invalid value in ") +
2565 Twine(PALMD::AssemblerDirective));
2566 }
2567 PALMetadata.push_back(Value);
2568 if (getLexer().isNot(AsmToken::Comma))
2569 break;
2570 Lex();
2571 }
2572 getTargetStreamer().EmitPALMetadata(PALMetadata);
2573 return false;
2574}
2575
2576bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
2577 StringRef IDVal = DirectiveID.getString();
2578
2579 if (IDVal == ".hsa_code_object_version")
2580 return ParseDirectiveHSACodeObjectVersion();
2581
2582 if (IDVal == ".hsa_code_object_isa")
2583 return ParseDirectiveHSACodeObjectISA();
2584
2585 if (IDVal == ".amd_kernel_code_t")
2586 return ParseDirectiveAMDKernelCodeT();
2587
2588 if (IDVal == ".amdgpu_hsa_kernel")
2589 return ParseDirectiveAMDGPUHsaKernel();
2590
2591 if (IDVal == ".amd_amdgpu_isa")
2592 return ParseDirectiveISAVersion();
2593
2594 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
2595 return ParseDirectiveHSAMetadata();
2596
2597 if (IDVal == PALMD::AssemblerDirective)
2598 return ParseDirectivePALMetadata();
2599
2600 return true;
2601}
2602
2603bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2604 unsigned RegNo) const {
2605
2606 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
2607 R.isValid(); ++R) {
2608 if (*R == RegNo)
2609 return isGFX9();
2610 }
2611
2612 switch (RegNo) {
2613 case AMDGPU::TBA:
2614 case AMDGPU::TBA_LO:
2615 case AMDGPU::TBA_HI:
2616 case AMDGPU::TMA:
2617 case AMDGPU::TMA_LO:
2618 case AMDGPU::TMA_HI:
2619 return !isGFX9();
2620 default:
2621 break;
2622 }
2623
2624 if (isCI())
2625 return true;
2626
2627 if (isSI()) {
2628 // No flat_scr
2629 switch (RegNo) {
2630 case AMDGPU::FLAT_SCR:
2631 case AMDGPU::FLAT_SCR_LO:
2632 case AMDGPU::FLAT_SCR_HI:
2633 return false;
2634 default:
2635 return true;
2636 }
2637 }
2638
2639 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2640 // SI/CI have.
2641 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2642 R.isValid(); ++R) {
2643 if (*R == RegNo)
2644 return false;
2645 }
2646
2647 return true;
2648}
2649
2650OperandMatchResultTy
2651AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
2652 // Try to parse with a custom parser
2653 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2654
2655 // If we successfully parsed the operand or if there as an error parsing,
2656 // we are done.
2657 //
2658 // If we are parsing after we reach EndOfStatement then this means we
2659 // are appending default values to the Operands list. This is only done
2660 // by custom parser, so we shouldn't continue on to the generic parsing.
2661 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
2662 getLexer().is(AsmToken::EndOfStatement))
2663 return ResTy;
2664
2665 ResTy = parseRegOrImm(Operands);
2666
2667 if (ResTy == MatchOperand_Success)
2668 return ResTy;
2669
2670 const auto &Tok = Parser.getTok();
2671 SMLoc S = Tok.getLoc();
2672
2673 const MCExpr *Expr = nullptr;
2674 if (!Parser.parseExpression(Expr)) {
2675 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2676 return MatchOperand_Success;
2677 }
2678
2679 // Possibly this is an instruction flag like 'gds'.
2680 if (Tok.getKind() == AsmToken::Identifier) {
2681 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
2682 Parser.Lex();
2683 return MatchOperand_Success;
2684 }
2685
2686 return MatchOperand_NoMatch;
2687}
2688
2689StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2690 // Clear any forced encodings from the previous instruction.
2691 setForcedEncodingSize(0);
2692 setForcedDPP(false);
2693 setForcedSDWA(false);
2694
2695 if (Name.endswith("_e64")) {
2696 setForcedEncodingSize(64);
2697 return Name.substr(0, Name.size() - 4);
2698 } else if (Name.endswith("_e32")) {
2699 setForcedEncodingSize(32);
2700 return Name.substr(0, Name.size() - 4);
2701 } else if (Name.endswith("_dpp")) {
2702 setForcedDPP(true);
2703 return Name.substr(0, Name.size() - 4);
2704 } else if (Name.endswith("_sdwa")) {
2705 setForcedSDWA(true);
2706 return Name.substr(0, Name.size() - 5);
2707 }
2708 return Name;
2709}
2710
2711bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2712 StringRef Name,
2713 SMLoc NameLoc, OperandVector &Operands) {
2714 // Add the instruction mnemonic
2715 Name = parseMnemonicSuffix(Name);
2716 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
2717
2718 while (!getLexer().is(AsmToken::EndOfStatement)) {
2719 OperandMatchResultTy Res = parseOperand(Operands, Name);
2720
2721 // Eat the comma or space if there is one.
2722 if (getLexer().is(AsmToken::Comma))
2723 Parser.Lex();
2724
2725 switch (Res) {
2726 case MatchOperand_Success: break;
2727 case MatchOperand_ParseFail:
2728 Error(getLexer().getLoc(), "failed parsing operand.");
2729 while (!getLexer().is(AsmToken::EndOfStatement)) {
2730 Parser.Lex();
2731 }
2732 return true;
2733 case MatchOperand_NoMatch:
2734 Error(getLexer().getLoc(), "not a valid operand.");
2735 while (!getLexer().is(AsmToken::EndOfStatement)) {
2736 Parser.Lex();
2737 }
2738 return true;
2739 }
2740 }
2741
2742 return false;
2743}
2744
2745//===----------------------------------------------------------------------===//
2746// Utility functions
2747//===----------------------------------------------------------------------===//
2748
2749OperandMatchResultTy
2750AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
2751 switch(getLexer().getKind()) {
2752 default: return MatchOperand_NoMatch;
2753 case AsmToken::Identifier: {
2754 StringRef Name = Parser.getTok().getString();
2755 if (!Name.equals(Prefix)) {
2756 return MatchOperand_NoMatch;
2757 }
2758
2759 Parser.Lex();
2760 if (getLexer().isNot(AsmToken::Colon))
2761 return MatchOperand_ParseFail;
2762
2763 Parser.Lex();
2764
2765 bool IsMinus = false;
2766 if (getLexer().getKind() == AsmToken::Minus) {
2767 Parser.Lex();
2768 IsMinus = true;
2769 }
2770
2771 if (getLexer().isNot(AsmToken::Integer))
2772 return MatchOperand_ParseFail;
2773
2774 if (getParser().parseAbsoluteExpression(Int))
2775 return MatchOperand_ParseFail;
2776
2777 if (IsMinus)
2778 Int = -Int;
2779 break;
2780 }
2781 }
2782 return MatchOperand_Success;
2783}
2784
2785OperandMatchResultTy
2786AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
2787 AMDGPUOperand::ImmTy ImmTy,
2788 bool (*ConvertResult)(int64_t&)) {
2789 SMLoc S = Parser.getTok().getLoc();
2790 int64_t Value = 0;
2791
2792 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
2793 if (Res != MatchOperand_Success)
2794 return Res;
2795
2796 if (ConvertResult && !ConvertResult(Value)) {
2797 return MatchOperand_ParseFail;
2798 }
2799
2800 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
2801 return MatchOperand_Success;
2802}
2803
2804OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
2805 const char *Prefix,
2806 OperandVector &Operands,
2807 AMDGPUOperand::ImmTy ImmTy,
2808 bool (*ConvertResult)(int64_t&)) {
2809 StringRef Name = Parser.getTok().getString();
2810 if (!Name.equals(Prefix))
2811 return MatchOperand_NoMatch;
2812
2813 Parser.Lex();
2814 if (getLexer().isNot(AsmToken::Colon))
2815 return MatchOperand_ParseFail;
2816
2817 Parser.Lex();
2818 if (getLexer().isNot(AsmToken::LBrac))
2819 return MatchOperand_ParseFail;
2820 Parser.Lex();
2821
2822 unsigned Val = 0;
2823 SMLoc S = Parser.getTok().getLoc();
2824
2825 // FIXME: How to verify the number of elements matches the number of src
2826 // operands?
2827 for (int I = 0; I < 4; ++I) {
2828 if (I != 0) {
2829 if (getLexer().is(AsmToken::RBrac))
2830 break;
2831
2832 if (getLexer().isNot(AsmToken::Comma))
2833 return MatchOperand_ParseFail;
2834 Parser.Lex();
2835 }
2836
2837 if (getLexer().isNot(AsmToken::Integer))
2838 return MatchOperand_ParseFail;
2839
2840 int64_t Op;
2841 if (getParser().parseAbsoluteExpression(Op))
2842 return MatchOperand_ParseFail;
2843
2844 if (Op != 0 && Op != 1)
2845 return MatchOperand_ParseFail;
2846 Val |= (Op << I);
2847 }
2848
2849 Parser.Lex();
2850 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
2851 return MatchOperand_Success;
2852}
2853
2854OperandMatchResultTy
2855AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
2856 AMDGPUOperand::ImmTy ImmTy) {
2857 int64_t Bit = 0;
2858 SMLoc S = Parser.getTok().getLoc();
2859
2860 // We are at the end of the statement, and this is a default argument, so
2861 // use a default value.
2862 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2863 switch(getLexer().getKind()) {
2864 case AsmToken::Identifier: {
2865 StringRef Tok = Parser.getTok().getString();
2866 if (Tok == Name) {
2867 Bit = 1;
2868 Parser.Lex();
2869 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
2870 Bit = 0;
2871 Parser.Lex();
2872 } else {
2873 return MatchOperand_NoMatch;
2874 }
2875 break;
2876 }
2877 default:
2878 return MatchOperand_NoMatch;
2879 }
2880 }
2881
2882 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
2883 return MatchOperand_Success;
2884}
2885
2886static void addOptionalImmOperand(
2887 MCInst& Inst, const OperandVector& Operands,
2888 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
2889 AMDGPUOperand::ImmTy ImmT,
2890 int64_t Default = 0) {
2891 auto i = OptionalIdx.find(ImmT);
2892 if (i != OptionalIdx.end()) {
2893 unsigned Idx = i->second;
2894 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
2895 } else {
2896 Inst.addOperand(MCOperand::createImm(Default));
2897 }
2898}
2899
2900OperandMatchResultTy
2901AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
2902 if (getLexer().isNot(AsmToken::Identifier)) {
2903 return MatchOperand_NoMatch;
2904 }
2905 StringRef Tok = Parser.getTok().getString();
2906 if (Tok != Prefix) {
2907 return MatchOperand_NoMatch;
2908 }
2909
2910 Parser.Lex();
2911 if (getLexer().isNot(AsmToken::Colon)) {
2912 return MatchOperand_ParseFail;
2913 }
2914
2915 Parser.Lex();
2916 if (getLexer().isNot(AsmToken::Identifier)) {
2917 return MatchOperand_ParseFail;
2918 }
2919
2920 Value = Parser.getTok().getString();
2921 return MatchOperand_Success;
2922}
2923
2924//===----------------------------------------------------------------------===//
2925// ds
2926//===----------------------------------------------------------------------===//
2927
2928void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2929 const OperandVector &Operands) {
2930 OptionalImmIndexMap OptionalIdx;
2931
2932 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2933 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2934
2935 // Add the register arguments
2936 if (Op.isReg()) {
2937 Op.addRegOperands(Inst, 1);
2938 continue;
2939 }
2940
2941 // Handle optional arguments
2942 OptionalIdx[Op.getImmTy()] = i;
2943 }
2944
2945 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2946 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
2947 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
2948
2949 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2950}
2951
2952void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
2953 bool IsGdsHardcoded) {
2954 OptionalImmIndexMap OptionalIdx;
2955
2956 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2957 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2958
2959 // Add the register arguments
2960 if (Op.isReg()) {
2961 Op.addRegOperands(Inst, 1);
2962 continue;
2963 }
2964
2965 if (Op.isToken() && Op.getToken() == "gds") {
2966 IsGdsHardcoded = true;
2967 continue;
2968 }
2969
2970 // Handle optional arguments
2971 OptionalIdx[Op.getImmTy()] = i;
2972 }
2973
2974 AMDGPUOperand::ImmTy OffsetType =
2975 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
2976 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
2977 AMDGPUOperand::ImmTyOffset;
2978
2979 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
2980
2981 if (!IsGdsHardcoded) {
2982 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
2983 }
2984 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2985}
2986
2987void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2988 OptionalImmIndexMap OptionalIdx;
2989
2990 unsigned OperandIdx[4];
2991 unsigned EnMask = 0;
2992 int SrcIdx = 0;
2993
2994 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2995 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2996
2997 // Add the register arguments
2998 if (Op.isReg()) {
2999 assert(SrcIdx < 4)(static_cast <bool> (SrcIdx < 4) ? void (0) : __assert_fail
("SrcIdx < 4", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 2999, __extension__ __PRETTY_FUNCTION__))
;
3000 OperandIdx[SrcIdx] = Inst.size();
3001 Op.addRegOperands(Inst, 1);
3002 ++SrcIdx;
3003 continue;
3004 }
3005
3006 if (Op.isOff()) {
3007 assert(SrcIdx < 4)(static_cast <bool> (SrcIdx < 4) ? void (0) : __assert_fail
("SrcIdx < 4", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3007, __extension__ __PRETTY_FUNCTION__))
;
3008 OperandIdx[SrcIdx] = Inst.size();
3009 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
3010 ++SrcIdx;
3011 continue;
3012 }
3013
3014 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
3015 Op.addImmOperands(Inst, 1);
3016 continue;
3017 }
3018
3019 if (Op.isToken() && Op.getToken() == "done")
3020 continue;
3021
3022 // Handle optional arguments
3023 OptionalIdx[Op.getImmTy()] = i;
3024 }
3025
3026 assert(SrcIdx == 4)(static_cast <bool> (SrcIdx == 4) ? void (0) : __assert_fail
("SrcIdx == 4", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3026, __extension__ __PRETTY_FUNCTION__))
;
3027
3028 bool Compr = false;
3029 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
3030 Compr = true;
3031 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
3032 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
3033 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
3034 }
3035
3036 for (auto i = 0; i < SrcIdx; ++i) {
3037 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
3038 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
3039 }
3040 }
3041
3042 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
3043 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
3044
3045 Inst.addOperand(MCOperand::createImm(EnMask));
3046}
3047
3048//===----------------------------------------------------------------------===//
3049// s_waitcnt
3050//===----------------------------------------------------------------------===//
3051
3052static bool
3053encodeCnt(
3054 const AMDGPU::IsaInfo::IsaVersion ISA,
3055 int64_t &IntVal,
3056 int64_t CntVal,
3057 bool Saturate,
3058 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
3059 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
3060{
3061 bool Failed = false;
3062
3063 IntVal = encode(ISA, IntVal, CntVal);
3064 if (CntVal != decode(ISA, IntVal)) {
3065 if (Saturate) {
3066 IntVal = encode(ISA, IntVal, -1);
3067 } else {
3068 Failed = true;
3069 }
3070 }
3071 return Failed;
3072}
3073
3074bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3075 StringRef CntName = Parser.getTok().getString();
3076 int64_t CntVal;
3077
3078 Parser.Lex();
3079 if (getLexer().isNot(AsmToken::LParen))
3080 return true;
3081
3082 Parser.Lex();
3083 if (getLexer().isNot(AsmToken::Integer))
3084 return true;
3085
3086 SMLoc ValLoc = Parser.getTok().getLoc();
3087 if (getParser().parseAbsoluteExpression(CntVal))
3088 return true;
3089
3090 AMDGPU::IsaInfo::IsaVersion ISA =
3091 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
3092
3093 bool Failed = true;
3094 bool Sat = CntName.endswith("_sat");
3095
3096 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3097 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3098 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3099 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3100 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3101 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3102 }
3103
3104 if (Failed) {
3105 Error(ValLoc, "too large value for " + CntName);
3106 return true;
3107 }
3108
3109 if (getLexer().isNot(AsmToken::RParen)) {
3110 return true;
3111 }
3112
3113 Parser.Lex();
3114 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3115 const AsmToken NextToken = getLexer().peekTok();
3116 if (NextToken.is(AsmToken::Identifier)) {
3117 Parser.Lex();
3118 }
3119 }
3120
3121 return false;
3122}
3123
3124OperandMatchResultTy
3125AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
3126 AMDGPU::IsaInfo::IsaVersion ISA =
3127 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
3128 int64_t Waitcnt = getWaitcntBitMask(ISA);
3129 SMLoc S = Parser.getTok().getLoc();
3130
3131 switch(getLexer().getKind()) {
3132 default: return MatchOperand_ParseFail;
3133 case AsmToken::Integer:
3134 // The operand can be an integer value.
3135 if (getParser().parseAbsoluteExpression(Waitcnt))
3136 return MatchOperand_ParseFail;
3137 break;
3138
3139 case AsmToken::Identifier:
3140 do {
3141 if (parseCnt(Waitcnt))
3142 return MatchOperand_ParseFail;
3143 } while(getLexer().isNot(AsmToken::EndOfStatement));
3144 break;
3145 }
3146 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
3147 return MatchOperand_Success;
3148}
3149
3150bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3151 int64_t &Width) {
3152 using namespace llvm::AMDGPU::Hwreg;
3153
3154 if (Parser.getTok().getString() != "hwreg")
3155 return true;
3156 Parser.Lex();
3157
3158 if (getLexer().isNot(AsmToken::LParen))
3159 return true;
3160 Parser.Lex();
3161
3162 if (getLexer().is(AsmToken::Identifier)) {
3163 HwReg.IsSymbolic = true;
3164 HwReg.Id = ID_UNKNOWN_;
3165 const StringRef tok = Parser.getTok().getString();
3166 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
3167 if (tok == IdSymbolic[i]) {
3168 HwReg.Id = i;
3169 break;
3170 }
3171 }
3172 Parser.Lex();
3173 } else {
3174 HwReg.IsSymbolic = false;
3175 if (getLexer().isNot(AsmToken::Integer))
3176 return true;
3177 if (getParser().parseAbsoluteExpression(HwReg.Id))
3178 return true;
3179 }
3180
3181 if (getLexer().is(AsmToken::RParen)) {
3182 Parser.Lex();
3183 return false;
3184 }
3185
3186 // optional params
3187 if (getLexer().isNot(AsmToken::Comma))
3188 return true;
3189 Parser.Lex();
3190
3191 if (getLexer().isNot(AsmToken::Integer))
3192 return true;
3193 if (getParser().parseAbsoluteExpression(Offset))
3194 return true;
3195
3196 if (getLexer().isNot(AsmToken::Comma))
3197 return true;
3198 Parser.Lex();
3199
3200 if (getLexer().isNot(AsmToken::Integer))
3201 return true;
3202 if (getParser().parseAbsoluteExpression(Width))
3203 return true;
3204
3205 if (getLexer().isNot(AsmToken::RParen))
3206 return true;
3207 Parser.Lex();
3208
3209 return false;
3210}
3211
3212OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
3213 using namespace llvm::AMDGPU::Hwreg;
3214
3215 int64_t Imm16Val = 0;
3216 SMLoc S = Parser.getTok().getLoc();
3217
3218 switch(getLexer().getKind()) {
3219 default: return MatchOperand_NoMatch;
3220 case AsmToken::Integer:
3221 // The operand can be an integer value.
3222 if (getParser().parseAbsoluteExpression(Imm16Val))
3223 return MatchOperand_NoMatch;
3224 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
3225 Error(S, "invalid immediate: only 16-bit values are legal");
3226 // Do not return error code, but create an imm operand anyway and proceed
3227 // to the next operand, if any. That avoids unneccessary error messages.
3228 }
3229 break;
3230
3231 case AsmToken::Identifier: {
3232 OperandInfoTy HwReg(ID_UNKNOWN_);
3233 int64_t Offset = OFFSET_DEFAULT_;
3234 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3235 if (parseHwregConstruct(HwReg, Offset, Width))
3236 return MatchOperand_ParseFail;
3237 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3238 if (HwReg.IsSymbolic)
3239 Error(S, "invalid symbolic name of hardware register");
3240 else
3241 Error(S, "invalid code of hardware register: only 6-bit values are legal");
3242 }
3243 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
3244 Error(S, "invalid bit offset: only 5-bit values are legal");
3245 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
3246 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
3247 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
3248 }
3249 break;
3250 }
3251 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
3252 return MatchOperand_Success;
3253}
3254
3255bool AMDGPUOperand::isSWaitCnt() const {
3256 return isImm();
3257}
3258
3259bool AMDGPUOperand::isHwreg() const {
3260 return isImmTy(ImmTyHwreg);
3261}
3262
3263bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
3264 using namespace llvm::AMDGPU::SendMsg;
3265
3266 if (Parser.getTok().getString() != "sendmsg")
3267 return true;
3268 Parser.Lex();
3269
3270 if (getLexer().isNot(AsmToken::LParen))
3271 return true;
3272 Parser.Lex();
3273
3274 if (getLexer().is(AsmToken::Identifier)) {
3275 Msg.IsSymbolic = true;
3276 Msg.Id = ID_UNKNOWN_;
3277 const std::string tok = Parser.getTok().getString();
3278 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3279 switch(i) {
3280 default: continue; // Omit gaps.
3281 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3282 }
3283 if (tok == IdSymbolic[i]) {
3284 Msg.Id = i;
3285 break;
3286 }
3287 }
3288 Parser.Lex();
3289 } else {
3290 Msg.IsSymbolic = false;
3291 if (getLexer().isNot(AsmToken::Integer))
3292 return true;
3293 if (getParser().parseAbsoluteExpression(Msg.Id))
3294 return true;
3295 if (getLexer().is(AsmToken::Integer))
3296 if (getParser().parseAbsoluteExpression(Msg.Id))
3297 Msg.Id = ID_UNKNOWN_;
3298 }
3299 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3300 return false;
3301
3302 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3303 if (getLexer().isNot(AsmToken::RParen))
3304 return true;
3305 Parser.Lex();
3306 return false;
3307 }
3308
3309 if (getLexer().isNot(AsmToken::Comma))
3310 return true;
3311 Parser.Lex();
3312
3313 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)(static_cast <bool> (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE
|| Msg.Id == ID_SYSMSG) ? void (0) : __assert_fail ("Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3313, __extension__ __PRETTY_FUNCTION__))
;
3314 Operation.Id = ID_UNKNOWN_;
3315 if (getLexer().is(AsmToken::Identifier)) {
3316 Operation.IsSymbolic = true;
3317 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3318 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3319 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
3320 const StringRef Tok = Parser.getTok().getString();
3321 for (int i = F; i < L; ++i) {
3322 if (Tok == S[i]) {
3323 Operation.Id = i;
3324 break;
3325 }
3326 }
3327 Parser.Lex();
3328 } else {
3329 Operation.IsSymbolic = false;
3330 if (getLexer().isNot(AsmToken::Integer))
3331 return true;
3332 if (getParser().parseAbsoluteExpression(Operation.Id))
3333 return true;
3334 }
3335
3336 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3337 // Stream id is optional.
3338 if (getLexer().is(AsmToken::RParen)) {
3339 Parser.Lex();
3340 return false;
3341 }
3342
3343 if (getLexer().isNot(AsmToken::Comma))
3344 return true;
3345 Parser.Lex();
3346
3347 if (getLexer().isNot(AsmToken::Integer))
3348 return true;
3349 if (getParser().parseAbsoluteExpression(StreamId))
3350 return true;
3351 }
3352
3353 if (getLexer().isNot(AsmToken::RParen))
3354 return true;
3355 Parser.Lex();
3356 return false;
3357}
3358
3359OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3360 if (getLexer().getKind() != AsmToken::Identifier)
3361 return MatchOperand_NoMatch;
3362
3363 StringRef Str = Parser.getTok().getString();
3364 int Slot = StringSwitch<int>(Str)
3365 .Case("p10", 0)
3366 .Case("p20", 1)
3367 .Case("p0", 2)
3368 .Default(-1);
3369
3370 SMLoc S = Parser.getTok().getLoc();
3371 if (Slot == -1)
3372 return MatchOperand_ParseFail;
3373
3374 Parser.Lex();
3375 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3376 AMDGPUOperand::ImmTyInterpSlot));
3377 return MatchOperand_Success;
3378}
3379
3380OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3381 if (getLexer().getKind() != AsmToken::Identifier)
3382 return MatchOperand_NoMatch;
3383
3384 StringRef Str = Parser.getTok().getString();
3385 if (!Str.startswith("attr"))
3386 return MatchOperand_NoMatch;
3387
3388 StringRef Chan = Str.take_back(2);
3389 int AttrChan = StringSwitch<int>(Chan)
3390 .Case(".x", 0)
3391 .Case(".y", 1)
3392 .Case(".z", 2)
3393 .Case(".w", 3)
3394 .Default(-1);
3395 if (AttrChan == -1)
3396 return MatchOperand_ParseFail;
3397
3398 Str = Str.drop_back(2).drop_front(4);
3399
3400 uint8_t Attr;
3401 if (Str.getAsInteger(10, Attr))
3402 return MatchOperand_ParseFail;
3403
3404 SMLoc S = Parser.getTok().getLoc();
3405 Parser.Lex();
3406 if (Attr > 63) {
3407 Error(S, "out of bounds attr");
3408 return MatchOperand_Success;
3409 }
3410
3411 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3412
3413 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3414 AMDGPUOperand::ImmTyInterpAttr));
3415 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3416 AMDGPUOperand::ImmTyAttrChan));
3417 return MatchOperand_Success;
3418}
3419
3420void AMDGPUAsmParser::errorExpTgt() {
3421 Error(Parser.getTok().getLoc(), "invalid exp target");
3422}
3423
3424OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3425 uint8_t &Val) {
3426 if (Str == "null") {
3427 Val = 9;
3428 return MatchOperand_Success;
3429 }
3430
3431 if (Str.startswith("mrt")) {
3432 Str = Str.drop_front(3);
3433 if (Str == "z") { // == mrtz
3434 Val = 8;
3435 return MatchOperand_Success;
3436 }
3437
3438 if (Str.getAsInteger(10, Val))
3439 return MatchOperand_ParseFail;
3440
3441 if (Val > 7)
3442 errorExpTgt();
3443
3444 return MatchOperand_Success;
3445 }
3446
3447 if (Str.startswith("pos")) {
3448 Str = Str.drop_front(3);
3449 if (Str.getAsInteger(10, Val))
3450 return MatchOperand_ParseFail;
3451
3452 if (Val > 3)
3453 errorExpTgt();
3454
3455 Val += 12;
3456 return MatchOperand_Success;
3457 }
3458
3459 if (Str.startswith("param")) {
3460 Str = Str.drop_front(5);
3461 if (Str.getAsInteger(10, Val))
3462 return MatchOperand_ParseFail;
3463
3464 if (Val >= 32)
3465 errorExpTgt();
3466
3467 Val += 32;
3468 return MatchOperand_Success;
3469 }
3470
3471 if (Str.startswith("invalid_target_")) {
3472 Str = Str.drop_front(15);
3473 if (Str.getAsInteger(10, Val))
3474 return MatchOperand_ParseFail;
3475
3476 errorExpTgt();
3477 return MatchOperand_Success;
3478 }
3479
3480 return MatchOperand_NoMatch;
3481}
3482
3483OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3484 uint8_t Val;
3485 StringRef Str = Parser.getTok().getString();
3486
3487 auto Res = parseExpTgtImpl(Str, Val);
3488 if (Res != MatchOperand_Success)
3489 return Res;
3490
3491 SMLoc S = Parser.getTok().getLoc();
3492 Parser.Lex();
3493
3494 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3495 AMDGPUOperand::ImmTyExpTgt));
3496 return MatchOperand_Success;
3497}
3498
3499OperandMatchResultTy
3500AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3501 using namespace llvm::AMDGPU::SendMsg;
3502
3503 int64_t Imm16Val = 0;
3504 SMLoc S = Parser.getTok().getLoc();
3505
3506 switch(getLexer().getKind()) {
3507 default:
3508 return MatchOperand_NoMatch;
3509 case AsmToken::Integer:
3510 // The operand can be an integer value.
3511 if (getParser().parseAbsoluteExpression(Imm16Val))
3512 return MatchOperand_NoMatch;
3513 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
3514 Error(S, "invalid immediate: only 16-bit values are legal");
3515 // Do not return error code, but create an imm operand anyway and proceed
3516 // to the next operand, if any. That avoids unneccessary error messages.
3517 }
3518 break;
3519 case AsmToken::Identifier: {
3520 OperandInfoTy Msg(ID_UNKNOWN_);
3521 OperandInfoTy Operation(OP_UNKNOWN_);
3522 int64_t StreamId = STREAM_ID_DEFAULT_;
3523 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3524 return MatchOperand_ParseFail;
3525 do {
3526 // Validate and encode message ID.
3527 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3528 || Msg.Id == ID_SYSMSG)) {
3529 if (Msg.IsSymbolic)
3530 Error(S, "invalid/unsupported symbolic name of message");
3531 else
3532 Error(S, "invalid/unsupported code of message");
3533 break;
3534 }
3535 Imm16Val = (Msg.Id << ID_SHIFT_);
3536 // Validate and encode operation ID.
3537 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3538 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3539 if (Operation.IsSymbolic)
3540 Error(S, "invalid symbolic name of GS_OP");
3541 else
3542 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3543 break;
3544 }
3545 if (Operation.Id == OP_GS_NOP
3546 && Msg.Id != ID_GS_DONE) {
3547 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3548 break;
3549 }
3550 Imm16Val |= (Operation.Id << OP_SHIFT_);
3551 }
3552 if (Msg.Id == ID_SYSMSG) {
3553 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3554 if (Operation.IsSymbolic)
3555 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3556 else
3557 Error(S, "invalid/unsupported code of SYSMSG_OP");
3558 break;
3559 }
3560 Imm16Val |= (Operation.Id << OP_SHIFT_);
3561 }
3562 // Validate and encode stream ID.
3563 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3564 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3565 Error(S, "invalid stream id: only 2-bit values are legal");
3566 break;
3567 }
3568 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3569 }
3570 } while (false);
3571 }
3572 break;
3573 }
3574 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
3575 return MatchOperand_Success;
3576}
3577
3578bool AMDGPUOperand::isSendMsg() const {
3579 return isImmTy(ImmTySendMsg);
3580}
3581
3582//===----------------------------------------------------------------------===//
3583// parser helpers
3584//===----------------------------------------------------------------------===//
3585
3586bool
3587AMDGPUAsmParser::trySkipId(const StringRef Id) {
3588 if (getLexer().getKind() == AsmToken::Identifier &&
3589 Parser.getTok().getString() == Id) {
3590 Parser.Lex();
3591 return true;
3592 }
3593 return false;
3594}
3595
3596bool
3597AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
3598 if (getLexer().getKind() == Kind) {
3599 Parser.Lex();
3600 return true;
3601 }
3602 return false;
3603}
3604
3605bool
3606AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
3607 const StringRef ErrMsg) {
3608 if (!trySkipToken(Kind)) {
3609 Error(Parser.getTok().getLoc(), ErrMsg);
3610 return false;
3611 }
3612 return true;
3613}
3614
3615bool
3616AMDGPUAsmParser::parseExpr(int64_t &Imm) {
3617 return !getParser().parseAbsoluteExpression(Imm);
3618}
3619
3620bool
3621AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
3622 SMLoc S = Parser.getTok().getLoc();
3623 if (getLexer().getKind() == AsmToken::String) {
3624 Val = Parser.getTok().getStringContents();
3625 Parser.Lex();
3626 return true;
3627 } else {
3628 Error(S, ErrMsg);
3629 return false;
3630 }
3631}
3632
3633//===----------------------------------------------------------------------===//
3634// swizzle
3635//===----------------------------------------------------------------------===//
3636
3637LLVM_READNONE__attribute__((__const__))
3638static unsigned
3639encodeBitmaskPerm(const unsigned AndMask,
3640 const unsigned OrMask,
3641 const unsigned XorMask) {
3642 using namespace llvm::AMDGPU::Swizzle;
3643
3644 return BITMASK_PERM_ENC |
3645 (AndMask << BITMASK_AND_SHIFT) |
3646 (OrMask << BITMASK_OR_SHIFT) |
3647 (XorMask << BITMASK_XOR_SHIFT);
3648}
3649
3650bool
3651AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
3652 const unsigned MinVal,
3653 const unsigned MaxVal,
3654 const StringRef ErrMsg) {
3655 for (unsigned i = 0; i < OpNum; ++i) {
3656 if (!skipToken(AsmToken::Comma, "expected a comma")){
3657 return false;
3658 }
3659 SMLoc ExprLoc = Parser.getTok().getLoc();
3660 if (!parseExpr(Op[i])) {
3661 return false;
3662 }
3663 if (Op[i] < MinVal || Op[i] > MaxVal) {
3664 Error(ExprLoc, ErrMsg);
3665 return false;
3666 }
3667 }
3668
3669 return true;
3670}
3671
3672bool
3673AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
3674 using namespace llvm::AMDGPU::Swizzle;
3675
3676 int64_t Lane[LANE_NUM];
3677 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
3678 "expected a 2-bit lane id")) {
3679 Imm = QUAD_PERM_ENC;
3680 for (auto i = 0; i < LANE_NUM; ++i) {
3681 Imm |= Lane[i] << (LANE_SHIFT * i);
3682 }
3683 return true;
3684 }
3685 return false;
3686}
3687
3688bool
3689AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
3690 using namespace llvm::AMDGPU::Swizzle;
3691
3692 SMLoc S = Parser.getTok().getLoc();
3693 int64_t GroupSize;
3694 int64_t LaneIdx;
3695
3696 if (!parseSwizzleOperands(1, &GroupSize,
3697 2, 32,
3698 "group size must be in the interval [2,32]")) {
3699 return false;
3700 }
3701 if (!isPowerOf2_64(GroupSize)) {
3702 Error(S, "group size must be a power of two");
3703 return false;
3704 }
3705 if (parseSwizzleOperands(1, &LaneIdx,
3706 0, GroupSize - 1,
3707 "lane id must be in the interval [0,group size - 1]")) {
3708 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
3709 return true;
3710 }
3711 return false;
3712}
3713
3714bool
3715AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
3716 using namespace llvm::AMDGPU::Swizzle;
3717
3718 SMLoc S = Parser.getTok().getLoc();
3719 int64_t GroupSize;
3720
3721 if (!parseSwizzleOperands(1, &GroupSize,
3722 2, 32, "group size must be in the interval [2,32]")) {
3723 return false;
3724 }
3725 if (!isPowerOf2_64(GroupSize)) {
3726 Error(S, "group size must be a power of two");
3727 return false;
3728 }
3729
3730 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
3731 return true;
3732}
3733
3734bool
3735AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
3736 using namespace llvm::AMDGPU::Swizzle;
3737
3738 SMLoc S = Parser.getTok().getLoc();
3739 int64_t GroupSize;
3740
3741 if (!parseSwizzleOperands(1, &GroupSize,
3742 1, 16, "group size must be in the interval [1,16]")) {
3743 return false;
3744 }
3745 if (!isPowerOf2_64(GroupSize)) {
3746 Error(S, "group size must be a power of two");
3747 return false;
3748 }
3749
3750 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
3751 return true;
3752}
3753
3754bool
3755AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
3756 using namespace llvm::AMDGPU::Swizzle;
3757
3758 if (!skipToken(AsmToken::Comma, "expected a comma")) {
3759 return false;
3760 }
3761
3762 StringRef Ctl;
3763 SMLoc StrLoc = Parser.getTok().getLoc();
3764 if (!parseString(Ctl)) {
3765 return false;
3766 }
3767 if (Ctl.size() != BITMASK_WIDTH) {
3768 Error(StrLoc, "expected a 5-character mask");
3769 return false;
3770 }
3771
3772 unsigned AndMask = 0;
3773 unsigned OrMask = 0;
3774 unsigned XorMask = 0;
3775
3776 for (size_t i = 0; i < Ctl.size(); ++i) {
3777 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
3778 switch(Ctl[i]) {
3779 default:
3780 Error(StrLoc, "invalid mask");
3781 return false;
3782 case '0':
3783 break;
3784 case '1':
3785 OrMask |= Mask;
3786 break;
3787 case 'p':
3788 AndMask |= Mask;
3789 break;
3790 case 'i':
3791 AndMask |= Mask;
3792 XorMask |= Mask;
3793 break;
3794 }
3795 }
3796
3797 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
3798 return true;
3799}
3800
3801bool
3802AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
3803
3804 SMLoc OffsetLoc = Parser.getTok().getLoc();
3805
3806 if (!parseExpr(Imm)) {
3807 return false;
3808 }
3809 if (!isUInt<16>(Imm)) {
3810 Error(OffsetLoc, "expected a 16-bit offset");
3811 return false;
3812 }
3813 return true;
3814}
3815
3816bool
3817AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
3818 using namespace llvm::AMDGPU::Swizzle;
3819
3820 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
3821
3822 SMLoc ModeLoc = Parser.getTok().getLoc();
3823 bool Ok = false;
3824
3825 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
3826 Ok = parseSwizzleQuadPerm(Imm);
3827 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
3828 Ok = parseSwizzleBitmaskPerm(Imm);
3829 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
3830 Ok = parseSwizzleBroadcast(Imm);
3831 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
3832 Ok = parseSwizzleSwap(Imm);
3833 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
3834 Ok = parseSwizzleReverse(Imm);
3835 } else {
3836 Error(ModeLoc, "expected a swizzle mode");
3837 }
3838
3839 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
3840 }
3841
3842 return false;
3843}
3844
3845OperandMatchResultTy
3846AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
3847 SMLoc S = Parser.getTok().getLoc();
3848 int64_t Imm = 0;
3849
3850 if (trySkipId("offset")) {
3851
3852 bool Ok = false;
3853 if (skipToken(AsmToken::Colon, "expected a colon")) {
3854 if (trySkipId("swizzle")) {
3855 Ok = parseSwizzleMacro(Imm);
3856 } else {
3857 Ok = parseSwizzleOffset(Imm);
3858 }
3859 }
3860
3861 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
3862
3863 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
3864 } else {
3865 // Swizzle "offset" operand is optional.
3866 // If it is omitted, try parsing other optional operands.
3867 return parseOptionalOpr(Operands);
3868 }
3869}
3870
3871bool
3872AMDGPUOperand::isSwizzle() const {
3873 return isImmTy(ImmTySwizzle);
3874}
3875
3876//===----------------------------------------------------------------------===//
3877// sopp branch targets
3878//===----------------------------------------------------------------------===//
3879
3880OperandMatchResultTy
3881AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
3882 SMLoc S = Parser.getTok().getLoc();
3883
3884 switch (getLexer().getKind()) {
3885 default: return MatchOperand_ParseFail;
3886 case AsmToken::Integer: {
3887 int64_t Imm;
3888 if (getParser().parseAbsoluteExpression(Imm))
3889 return MatchOperand_ParseFail;
3890 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
3891 return MatchOperand_Success;
3892 }
3893
3894 case AsmToken::Identifier:
3895 Operands.push_back(AMDGPUOperand::CreateExpr(this,
3896 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
3897 Parser.getTok().getString()), getContext()), S));
3898 Parser.Lex();
3899 return MatchOperand_Success;
3900 }
3901}
3902
3903//===----------------------------------------------------------------------===//
3904// mubuf
3905//===----------------------------------------------------------------------===//
3906
3907AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
3908 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
3909}
3910
3911AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
3912 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
3913}
3914
3915AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
3916 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
3917}
3918
3919void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
3920 const OperandVector &Operands,
3921 bool IsAtomic, bool IsAtomicReturn) {
3922 OptionalImmIndexMap OptionalIdx;
3923 assert(IsAtomicReturn ? IsAtomic : true)(static_cast <bool> (IsAtomicReturn ? IsAtomic : true) ?
void (0) : __assert_fail ("IsAtomicReturn ? IsAtomic : true"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3923, __extension__ __PRETTY_FUNCTION__))
;
3924
3925 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3926 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3927
3928 // Add the register arguments
3929 if (Op.isReg()) {
3930 Op.addRegOperands(Inst, 1);
3931 continue;
3932 }
3933
3934 // Handle the case where soffset is an immediate
3935 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3936 Op.addImmOperands(Inst, 1);
3937 continue;
3938 }
3939
3940 // Handle tokens like 'offen' which are sometimes hard-coded into the
3941 // asm string. There are no MCInst operands for these.
3942 if (Op.isToken()) {
3943 continue;
3944 }
3945 assert(Op.isImm())(static_cast <bool> (Op.isImm()) ? void (0) : __assert_fail
("Op.isImm()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3945, __extension__ __PRETTY_FUNCTION__))
;
3946
3947 // Handle optional arguments
3948 OptionalIdx[Op.getImmTy()] = i;
3949 }
3950
3951 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
3952 if (IsAtomicReturn) {
3953 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
3954 Inst.insert(I, *I);
3955 }
3956
3957 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
3958 if (!IsAtomic) { // glc is hard-coded.
3959 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3960 }
3961 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3962 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3963}
3964
3965void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
3966 OptionalImmIndexMap OptionalIdx;
3967
3968 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3969 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3970
3971 // Add the register arguments
3972 if (Op.isReg()) {
3973 Op.addRegOperands(Inst, 1);
3974 continue;
3975 }
3976
3977 // Handle the case where soffset is an immediate
3978 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3979 Op.addImmOperands(Inst, 1);
3980 continue;
3981 }
3982
3983 // Handle tokens like 'offen' which are sometimes hard-coded into the
3984 // asm string. There are no MCInst operands for these.
3985 if (Op.isToken()) {
3986 continue;
3987 }
3988 assert(Op.isImm())(static_cast <bool> (Op.isImm()) ? void (0) : __assert_fail
("Op.isImm()", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 3988, __extension__ __PRETTY_FUNCTION__))
;
3989
3990 // Handle optional arguments
3991 OptionalIdx[Op.getImmTy()] = i;
3992 }
3993
3994 addOptionalImmOperand(Inst, Operands, OptionalIdx,
3995 AMDGPUOperand::ImmTyOffset);
3996 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDFMT);
3997 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyNFMT);
3998 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3999 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4000 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4001}
4002
4003//===----------------------------------------------------------------------===//
4004// mimg
4005//===----------------------------------------------------------------------===//
4006
4007void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
4008 bool IsAtomic) {
4009 unsigned I = 1;
4010 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4011 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4012 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4013 }
4014
4015 if (IsAtomic) {
4016 // Add src, same as dst
4017 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
4018 }
4019
4020 OptionalImmIndexMap OptionalIdx;
4021
4022 for (unsigned E = Operands.size(); I != E; ++I) {
4023 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4024
4025 // Add the register arguments
4026 if (Op.isRegOrImm()) {
4027 Op.addRegOrImmOperands(Inst, 1);
4028 continue;
4029 } else if (Op.isImmModifier()) {
4030 OptionalIdx[Op.getImmTy()] = I;
4031 } else {
4032 llvm_unreachable("unexpected operand type")::llvm::llvm_unreachable_internal("unexpected operand type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4032)
;
4033 }
4034 }
4035
4036 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
4037 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
4038 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4039 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
4040 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
4041 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4042 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
4043 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4044}
4045
4046void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
4047 cvtMIMG(Inst, Operands, true);
4048}
4049
4050AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
4051 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
4052}
4053
4054AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
4055 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
4056}
4057
4058AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
4059 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
4060}
4061
4062AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
4063 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
4064}
4065
4066AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
4067 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
4068}
4069
4070//===----------------------------------------------------------------------===//
4071// smrd
4072//===----------------------------------------------------------------------===//
4073
4074bool AMDGPUOperand::isSMRDOffset8() const {
4075 return isImm() && isUInt<8>(getImm());
4076}
4077
4078bool AMDGPUOperand::isSMRDOffset20() const {
4079 return isImm() && isUInt<20>(getImm());
4080}
4081
4082bool AMDGPUOperand::isSMRDLiteralOffset() const {
4083 // 32-bit literals are only supported on CI and we only want to use them
4084 // when the offset is > 8-bits.
4085 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
4086}
4087
4088AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
4089 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4090}
4091
4092AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
4093 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4094}
4095
4096AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
4097 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4098}
4099
4100AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
4101 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4102}
4103
4104AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
4105 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4106}
4107
4108//===----------------------------------------------------------------------===//
4109// vop3
4110//===----------------------------------------------------------------------===//
4111
4112static bool ConvertOmodMul(int64_t &Mul) {
4113 if (Mul != 1 && Mul != 2 && Mul != 4)
4114 return false;
4115
4116 Mul >>= 1;
4117 return true;
4118}
4119
4120static bool ConvertOmodDiv(int64_t &Div) {
4121 if (Div == 1) {
4122 Div = 0;
4123 return true;
4124 }
4125
4126 if (Div == 2) {
4127 Div = 3;
4128 return true;
4129 }
4130
4131 return false;
4132}
4133
4134static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4135 if (BoundCtrl == 0) {
4136 BoundCtrl = 1;
4137 return true;
4138 }
4139
4140 if (BoundCtrl == -1) {
4141 BoundCtrl = 0;
4142 return true;
4143 }
4144
4145 return false;
4146}
4147
4148// Note: the order in this table matches the order of operands in AsmString.
4149static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4150 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4151 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4152 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4153 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4154 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4155 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
4156 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
4157 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
4158 {"dfmt", AMDGPUOperand::ImmTyDFMT, false, nullptr},
4159 {"nfmt", AMDGPUOperand::ImmTyNFMT, false, nullptr},
4160 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4161 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4162 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
4163 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
4164 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4165 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4166 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4167 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
4168 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
4169 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
4170 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4171 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4172 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4173 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
4174 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4175 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4176 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
4177 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
4178 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
4179 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
4180 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4181 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4182 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4183 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
4184};
4185
4186OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
4187 unsigned size = Operands.size();
4188 assert(size > 0)(static_cast <bool> (size > 0) ? void (0) : __assert_fail
("size > 0", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4188, __extension__ __PRETTY_FUNCTION__))
;
4189
4190 OperandMatchResultTy res = parseOptionalOpr(Operands);
4191
4192 // This is a hack to enable hardcoded mandatory operands which follow
4193 // optional operands.
4194 //
4195 // Current design assumes that all operands after the first optional operand
4196 // are also optional. However implementation of some instructions violates
4197 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
4198 //
4199 // To alleviate this problem, we have to (implicitly) parse extra operands
4200 // to make sure autogenerated parser of custom operands never hit hardcoded
4201 // mandatory operands.
4202
4203 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
4204
4205 // We have parsed the first optional operand.
4206 // Parse as many operands as necessary to skip all mandatory operands.
4207
4208 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
4209 if (res != MatchOperand_Success ||
4210 getLexer().is(AsmToken::EndOfStatement)) break;
4211 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
4212 res = parseOptionalOpr(Operands);
4213 }
4214 }
4215
4216 return res;
4217}
4218
4219OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
4220 OperandMatchResultTy res;
4221 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4222 // try to parse any optional operand here
4223 if (Op.IsBit) {
4224 res = parseNamedBit(Op.Name, Operands, Op.Type);
4225 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4226 res = parseOModOperand(Operands);
4227 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4228 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4229 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4230 res = parseSDWASel(Operands, Op.Name, Op.Type);
4231 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4232 res = parseSDWADstUnused(Operands);
4233 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4234 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4235 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4236 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4237 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4238 Op.ConvertResult);
4239 } else {
4240 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4241 }
4242 if (res != MatchOperand_NoMatch) {
4243 return res;
4244 }
4245 }
4246 return MatchOperand_NoMatch;
4247}
4248
4249OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
4250 StringRef Name = Parser.getTok().getString();
4251 if (Name == "mul") {
4252 return parseIntWithPrefix("mul", Operands,
4253 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
4254 }
4255
4256 if (Name == "div") {
4257 return parseIntWithPrefix("div", Operands,
4258 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4259 }
4260
4261 return MatchOperand_NoMatch;
4262}
4263
4264void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4265 cvtVOP3P(Inst, Operands);
4266
4267 int Opc = Inst.getOpcode();
4268
4269 int SrcNum;
4270 const int Ops[] = { AMDGPU::OpName::src0,
4271 AMDGPU::OpName::src1,
4272 AMDGPU::OpName::src2 };
4273 for (SrcNum = 0;
4274 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4275 ++SrcNum);
4276 assert(SrcNum > 0)(static_cast <bool> (SrcNum > 0) ? void (0) : __assert_fail
("SrcNum > 0", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4276, __extension__ __PRETTY_FUNCTION__))
;
4277
4278 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4279 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4280
4281 if ((OpSel & (1 << SrcNum)) != 0) {
4282 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4283 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4284 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4285 }
4286}
4287
4288static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4289 // 1. This operand is input modifiers
4290 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4291 // 2. This is not last operand
4292 && Desc.NumOperands > (OpNum + 1)
4293 // 3. Next operand is register class
4294 && Desc.OpInfo[OpNum + 1].RegClass != -1
4295 // 4. Next register is not tied to any other operand
4296 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4297}
4298
4299void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
4300{
4301 OptionalImmIndexMap OptionalIdx;
4302 unsigned Opc = Inst.getOpcode();
4303
4304 unsigned I = 1;
4305 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4306 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4307 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4308 }
4309
4310 for (unsigned E = Operands.size(); I != E; ++I) {
4311 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4312 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4313 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4314 } else if (Op.isInterpSlot() ||
4315 Op.isInterpAttr() ||
4316 Op.isAttrChan()) {
4317 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4318 } else if (Op.isImmModifier()) {
4319 OptionalIdx[Op.getImmTy()] = I;
4320 } else {
4321 llvm_unreachable("unhandled operand type")::llvm::llvm_unreachable_internal("unhandled operand type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4321)
;
4322 }
4323 }
4324
4325 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4326 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4327 }
4328
4329 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4330 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4331 }
4332
4333 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4334 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4335 }
4336}
4337
4338void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4339 OptionalImmIndexMap &OptionalIdx) {
4340 unsigned Opc = Inst.getOpcode();
4341
4342 unsigned I = 1;
4343 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4344 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4345 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4346 }
4347
4348 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4349 // This instruction has src modifiers
4350 for (unsigned E = Operands.size(); I != E; ++I) {
4351 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4352 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4353 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4354 } else if (Op.isImmModifier()) {
4355 OptionalIdx[Op.getImmTy()] = I;
4356 } else if (Op.isRegOrImm()) {
4357 Op.addRegOrImmOperands(Inst, 1);
4358 } else {
4359 llvm_unreachable("unhandled operand type")::llvm::llvm_unreachable_internal("unhandled operand type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4359)
;
4360 }
4361 }
4362 } else {
4363 // No src modifiers
4364 for (unsigned E = Operands.size(); I != E; ++I) {
4365 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4366 if (Op.isMod()) {
4367 OptionalIdx[Op.getImmTy()] = I;
4368 } else {
4369 Op.addRegOrImmOperands(Inst, 1);
4370 }
4371 }
4372 }
4373
4374 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4375 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4376 }
4377
4378 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4379 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4380 }
4381
4382 // special case v_mac_{f16, f32}:
4383 // it has src2 register operand that is tied to dst operand
4384 // we don't allow modifiers for this operand in assembler so src2_modifiers
4385 // should be 0
4386 if (Opc == AMDGPU::V_MAC_F32_e64_si || Opc == AMDGPU::V_MAC_F32_e64_vi ||
4387 Opc == AMDGPU::V_MAC_F16_e64_vi) {
4388 auto it = Inst.begin();
4389 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
4390 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
4391 ++it;
4392 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4393 }
4394}
4395
4396void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
4397 OptionalImmIndexMap OptionalIdx;
4398 cvtVOP3(Inst, Operands, OptionalIdx);
4399}
4400
4401void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
4402 const OperandVector &Operands) {
4403 OptionalImmIndexMap OptIdx;
4404 const int Opc = Inst.getOpcode();
4405 const MCInstrDesc &Desc = MII.get(Opc);
4406
4407 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
4408
4409 cvtVOP3(Inst, Operands, OptIdx);
4410
4411 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
4412 assert(!IsPacked)(static_cast <bool> (!IsPacked) ? void (0) : __assert_fail
("!IsPacked", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4412, __extension__ __PRETTY_FUNCTION__))
;
4413 Inst.addOperand(Inst.getOperand(0));
4414 }
4415
4416 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
4417 // instruction, and then figure out where to actually put the modifiers
4418
4419 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
4420
4421 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
4422 if (OpSelHiIdx != -1) {
4423 int DefaultVal = IsPacked ? -1 : 0;
4424 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
4425 DefaultVal);
4426 }
4427
4428 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
4429 if (NegLoIdx != -1) {
4430 assert(IsPacked)(static_cast <bool> (IsPacked) ? void (0) : __assert_fail
("IsPacked", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4430, __extension__ __PRETTY_FUNCTION__))
;
4431 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
4432 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
4433 }
4434
4435 const int Ops[] = { AMDGPU::OpName::src0,
4436 AMDGPU::OpName::src1,
4437 AMDGPU::OpName::src2 };
4438 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
4439 AMDGPU::OpName::src1_modifiers,
4440 AMDGPU::OpName::src2_modifiers };
4441
4442 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4443
4444 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4445 unsigned OpSelHi = 0;
4446 unsigned NegLo = 0;
4447 unsigned NegHi = 0;
4448
4449 if (OpSelHiIdx != -1) {
4450 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
4451 }
4452
4453 if (NegLoIdx != -1) {
4454 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
4455 NegLo = Inst.getOperand(NegLoIdx).getImm();
4456 NegHi = Inst.getOperand(NegHiIdx).getImm();
4457 }
4458
4459 for (int J = 0; J < 3; ++J) {
4460 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
4461 if (OpIdx == -1)
4462 break;
4463
4464 uint32_t ModVal = 0;
4465
4466 if ((OpSel & (1 << J)) != 0)
4467 ModVal |= SISrcMods::OP_SEL_0;
4468
4469 if ((OpSelHi & (1 << J)) != 0)
4470 ModVal |= SISrcMods::OP_SEL_1;
4471
4472 if ((NegLo & (1 << J)) != 0)
4473 ModVal |= SISrcMods::NEG;
4474
4475 if ((NegHi & (1 << J)) != 0)
4476 ModVal |= SISrcMods::NEG_HI;
4477
4478 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
4479
4480 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
4481 }
4482}
4483
4484//===----------------------------------------------------------------------===//
4485// dpp
4486//===----------------------------------------------------------------------===//
4487
4488bool AMDGPUOperand::isDPPCtrl() const {
4489 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
4490 if (result) {
4491 int64_t Imm = getImm();
4492 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
4493 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
4494 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
4495 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
4496 (Imm == 0x130) ||
4497 (Imm == 0x134) ||
4498 (Imm == 0x138) ||
4499 (Imm == 0x13c) ||
4500 (Imm == 0x140) ||
4501 (Imm == 0x141) ||
4502 (Imm == 0x142) ||
4503 (Imm == 0x143);
4504 }
4505 return false;
4506}
4507
4508bool AMDGPUOperand::isGPRIdxMode() const {
4509 return isImm() && isUInt<4>(getImm());
4510}
4511
4512bool AMDGPUOperand::isS16Imm() const {
4513 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
4514}
4515
4516bool AMDGPUOperand::isU16Imm() const {
4517 return isImm() && isUInt<16>(getImm());
4518}
4519
4520OperandMatchResultTy
4521AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
4522 SMLoc S = Parser.getTok().getLoc();
4523 StringRef Prefix;
4524 int64_t Int;
4525
4526 if (getLexer().getKind() == AsmToken::Identifier) {
4527 Prefix = Parser.getTok().getString();
4528 } else {
4529 return MatchOperand_NoMatch;
4530 }
4531
4532 if (Prefix == "row_mirror") {
4533 Int = 0x140;
4534 Parser.Lex();
4535 } else if (Prefix == "row_half_mirror") {
4536 Int = 0x141;
4537 Parser.Lex();
4538 } else {
4539 // Check to prevent parseDPPCtrlOps from eating invalid tokens
4540 if (Prefix != "quad_perm"
4541 && Prefix != "row_shl"
4542 && Prefix != "row_shr"
4543 && Prefix != "row_ror"
4544 && Prefix != "wave_shl"
4545 && Prefix != "wave_rol"
4546 && Prefix != "wave_shr"
4547 && Prefix != "wave_ror"
4548 && Prefix != "row_bcast") {
4549 return MatchOperand_NoMatch;
4550 }
4551
4552 Parser.Lex();
4553 if (getLexer().isNot(AsmToken::Colon))
4554 return MatchOperand_ParseFail;
4555
4556 if (Prefix == "quad_perm") {
4557 // quad_perm:[%d,%d,%d,%d]
4558 Parser.Lex();
4559 if (getLexer().isNot(AsmToken::LBrac))
4560 return MatchOperand_ParseFail;
4561 Parser.Lex();
4562
4563 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
4564 return MatchOperand_ParseFail;
4565
4566 for (int i = 0; i < 3; ++i) {
4567 if (getLexer().isNot(AsmToken::Comma))
4568 return MatchOperand_ParseFail;
4569 Parser.Lex();
4570
4571 int64_t Temp;
4572 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
4573 return MatchOperand_ParseFail;
4574 const int shift = i*2 + 2;
4575 Int += (Temp << shift);
4576 }
4577
4578 if (getLexer().isNot(AsmToken::RBrac))
4579 return MatchOperand_ParseFail;
4580 Parser.Lex();
4581 } else {
4582 // sel:%d
4583 Parser.Lex();
4584 if (getParser().parseAbsoluteExpression(Int))
4585 return MatchOperand_ParseFail;
4586
4587 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
4588 Int |= 0x100;
4589 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
4590 Int |= 0x110;
4591 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
4592 Int |= 0x120;
4593 } else if (Prefix == "wave_shl" && 1 == Int) {
4594 Int = 0x130;
4595 } else if (Prefix == "wave_rol" && 1 == Int) {
4596 Int = 0x134;
4597 } else if (Prefix == "wave_shr" && 1 == Int) {
4598 Int = 0x138;
4599 } else if (Prefix == "wave_ror" && 1 == Int) {
4600 Int = 0x13C;
4601 } else if (Prefix == "row_bcast") {
4602 if (Int == 15) {
4603 Int = 0x142;
4604 } else if (Int == 31) {
4605 Int = 0x143;
4606 } else {
4607 return MatchOperand_ParseFail;
4608 }
4609 } else {
4610 return MatchOperand_ParseFail;
4611 }
4612 }
4613 }
4614
4615 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
4616 return MatchOperand_Success;
4617}
4618
4619AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
4620 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
4621}
4622
4623AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
4624 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
4625}
4626
4627AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
4628 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
4629}
4630
4631void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
4632 OptionalImmIndexMap OptionalIdx;
4633
4634 unsigned I = 1;
4635 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4636 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4637 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4638 }
4639
4640 // All DPP instructions with at least one source operand have a fake "old"
4641 // source at the beginning that's tied to the dst operand. Handle it here.
4642 if (Desc.getNumOperands() >= 2)
4643 Inst.addOperand(Inst.getOperand(0));
4644
4645 for (unsigned E = Operands.size(); I != E; ++I) {
4646 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4647 // Add the register arguments
4648 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4649 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
4650 // Skip it.
4651 continue;
4652 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4653 Op.addRegWithFPInputModsOperands(Inst, 2);
4654 } else if (Op.isDPPCtrl()) {
4655 Op.addImmOperands(Inst, 1);
4656 } else if (Op.isImm()) {
4657 // Handle optional arguments
4658 OptionalIdx[Op.getImmTy()] = I;
4659 } else {
4660 llvm_unreachable("Invalid operand type")::llvm::llvm_unreachable_internal("Invalid operand type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4660)
;
4661 }
4662 }
4663
4664 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
4665 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
4666 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
4667}
4668
4669//===----------------------------------------------------------------------===//
4670// sdwa
4671//===----------------------------------------------------------------------===//
4672
4673OperandMatchResultTy
4674AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
4675 AMDGPUOperand::ImmTy Type) {
4676 using namespace llvm::AMDGPU::SDWA;
4677
4678 SMLoc S = Parser.getTok().getLoc();
4679 StringRef Value;
4680 OperandMatchResultTy res;
4681
4682 res = parseStringWithPrefix(Prefix, Value);
4683 if (res != MatchOperand_Success) {
4684 return res;
4685 }
4686
4687 int64_t Int;
4688 Int = StringSwitch<int64_t>(Value)
4689 .Case("BYTE_0", SdwaSel::BYTE_0)
4690 .Case("BYTE_1", SdwaSel::BYTE_1)
4691 .Case("BYTE_2", SdwaSel::BYTE_2)
4692 .Case("BYTE_3", SdwaSel::BYTE_3)
4693 .Case("WORD_0", SdwaSel::WORD_0)
4694 .Case("WORD_1", SdwaSel::WORD_1)
4695 .Case("DWORD", SdwaSel::DWORD)
4696 .Default(0xffffffff);
4697 Parser.Lex(); // eat last token
4698
4699 if (Int == 0xffffffff) {
4700 return MatchOperand_ParseFail;
4701 }
4702
4703 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
4704 return MatchOperand_Success;
4705}
4706
4707OperandMatchResultTy
4708AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
4709 using namespace llvm::AMDGPU::SDWA;
4710
4711 SMLoc S = Parser.getTok().getLoc();
4712 StringRef Value;
4713 OperandMatchResultTy res;
4714
4715 res = parseStringWithPrefix("dst_unused", Value);
4716 if (res != MatchOperand_Success) {
4717 return res;
4718 }
4719
4720 int64_t Int;
4721 Int = StringSwitch<int64_t>(Value)
4722 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4723 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4724 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
4725 .Default(0xffffffff);
4726 Parser.Lex(); // eat last token
4727
4728 if (Int == 0xffffffff) {
4729 return MatchOperand_ParseFail;
4730 }
4731
4732 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
4733 return MatchOperand_Success;
4734}
4735
4736void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
4737 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
4738}
4739
4740void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
4741 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4742}
4743
4744void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
4745 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
4746}
4747
4748void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
4749 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
4750}
4751
4752void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
4753 uint64_t BasicInstType, bool skipVcc) {
4754 using namespace llvm::AMDGPU::SDWA;
4755
4756 OptionalImmIndexMap OptionalIdx;
4757 bool skippedVcc = false;
4758
4759 unsigned I = 1;
4760 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4761 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4762 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4763 }
4764
4765 for (unsigned E = Operands.size(); I != E; ++I) {
4766 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4767 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4768 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
4769 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
4770 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
4771 // Skip VCC only if we didn't skip it on previous iteration.
4772 if (BasicInstType == SIInstrFlags::VOP2 &&
4773 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
4774 skippedVcc = true;
4775 continue;
4776 } else if (BasicInstType == SIInstrFlags::VOPC &&
4777 Inst.getNumOperands() == 0) {
4778 skippedVcc = true;
4779 continue;
4780 }
4781 }
4782 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4783 Op.addRegWithInputModsOperands(Inst, 2);
4784 } else if (Op.isImm()) {
4785 // Handle optional arguments
4786 OptionalIdx[Op.getImmTy()] = I;
4787 } else {
4788 llvm_unreachable("Invalid operand type")::llvm::llvm_unreachable_internal("Invalid operand type", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4788)
;
4789 }
4790 skippedVcc = false;
4791 }
4792
4793 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
4794 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
4795 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
4796 switch (BasicInstType) {
4797 case SIInstrFlags::VOP1:
4798 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4799 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4800 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4801 }
4802 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4803 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4804 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4805 break;
4806
4807 case SIInstrFlags::VOP2:
4808 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4809 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
4810 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4811 }
4812 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4813 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4814 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4815 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
4816 break;
4817
4818 case SIInstrFlags::VOPC:
4819 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
4820 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4821 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
4822 break;
4823
4824 default:
4825 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed")::llvm::llvm_unreachable_internal("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp"
, 4825)
;
4826 }
4827 }
4828
4829 // special case v_mac_{f16, f32}:
4830 // it has src2 register operand that is tied to dst operand
4831 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
4832 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
4833 auto it = Inst.begin();
4834 std::advance(
4835 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
4836 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4837 }
4838}
4839
4840/// Force static initialization.
4841extern "C" void LLVMInitializeAMDGPUAsmParser() {
4842 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
4843 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
4844}
4845
4846#define GET_REGISTER_MATCHER
4847#define GET_MATCHER_IMPLEMENTATION
4848#define GET_MNEMONIC_SPELL_CHECKER
4849#include "AMDGPUGenAsmMatcher.inc"
4850
4851// This fuction should be defined after auto-generated include so that we have
4852// MatchClassKind enum defined
4853unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
4854 unsigned Kind) {
4855 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
4856 // But MatchInstructionImpl() expects to meet token and fails to validate
4857 // operand. This method checks if we are given immediate operand but expect to
4858 // get corresponding token.
4859 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
4860 switch (Kind) {
4861 case MCK_addr64:
4862 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
4863 case MCK_gds:
4864 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
4865 case MCK_glc:
4866 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
4867 case MCK_idxen:
4868 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
4869 case MCK_offen:
4870 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
4871 case MCK_SSrcB32:
4872 // When operands have expression values, they will return true for isToken,
4873 // because it is not possible to distinguish between a token and an
4874 // expression at parse time. MatchInstructionImpl() will always try to
4875 // match an operand as a token, when isToken returns true, and when the
4876 // name of the expression is not a valid token, the match will fail,
4877 // so we need to handle it here.
4878 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
4879 case MCK_SSrcF32:
4880 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
4881 case MCK_SoppBrTarget:
4882 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
4883 case MCK_VReg32OrOff:
4884 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
4885 case MCK_InterpSlot:
4886 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
4887 case MCK_Attr:
4888 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
4889 case MCK_AttrChan:
4890 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
4891 default:
4892 return Match_InvalidOperand;
4893 }
4894}

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmParserExtension.h

1//===- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks --------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
11#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
12
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/MC/MCParser/MCAsmLexer.h"
16#include "llvm/MC/MCParser/MCAsmParser.h"
17#include "llvm/Support/SMLoc.h"
18
19namespace llvm {
20
21class Twine;
22
23/// \brief Generic interface for extending the MCAsmParser,
24/// which is implemented by target and object file assembly parser
25/// implementations.
26class MCAsmParserExtension {
27 MCAsmParser *Parser;
28
29protected:
30 MCAsmParserExtension();
31
32 // Helper template for implementing static dispatch functions.
33 template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
34 static bool HandleDirective(MCAsmParserExtension *Target,
35 StringRef Directive,
36 SMLoc DirectiveLoc) {
37 T *Obj = static_cast<T*>(Target);
38 return (Obj->*Handler)(Directive, DirectiveLoc);
39 }
40
41 bool BracketExpressionsSupported = false;
42
43public:
44 MCAsmParserExtension(const MCAsmParserExtension &) = delete;
45 MCAsmParserExtension &operator=(const MCAsmParserExtension &) = delete;
46 virtual ~MCAsmParserExtension();
47
48 /// \brief Initialize the extension for parsing using the given \p Parser.
49 /// The extension should use the AsmParser interfaces to register its
50 /// parsing routines.
51 virtual void Initialize(MCAsmParser &Parser);
52
53 /// \name MCAsmParser Proxy Interfaces
54 /// @{
55
56 MCContext &getContext() { return getParser().getContext(); }
57
58 MCAsmLexer &getLexer() { return getParser().getLexer(); }
6
Calling 'MCAsmParserExtension::getParser'
7
Returning from 'MCAsmParserExtension::getParser'
59 const MCAsmLexer &getLexer() const {
60 return const_cast<MCAsmParserExtension *>(this)->getLexer();
61 }
62
63 MCAsmParser &getParser() { return *Parser; }
64 const MCAsmParser &getParser() const {
65 return const_cast<MCAsmParserExtension*>(this)->getParser();
66 }
67
68 SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
69 MCStreamer &getStreamer() { return getParser().getStreamer(); }
70
71 bool Warning(SMLoc L, const Twine &Msg) {
72 return getParser().Warning(L, Msg);
73 }
74
75 bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
76 return getParser().Error(L, Msg, Range);
77 }
78
79 void Note(SMLoc L, const Twine &Msg) {
80 getParser().Note(L, Msg);
81 }
82
83 bool TokError(const Twine &Msg) {
84 return getParser().TokError(Msg);
31
Calling 'MCAsmParserExtension::getParser'
32
Returning from 'MCAsmParserExtension::getParser'
85 }
86
87 const AsmToken &Lex() { return getParser().Lex(); }
88 const AsmToken &getTok() { return getParser().getTok(); }
89 bool parseToken(AsmToken::TokenKind T,
90 const Twine &Msg = "unexpected token") {
91 return getParser().parseToken(T, Msg);
92 }
93
94 bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
95 return getParser().parseMany(parseOne, hasComma);
96 }
97
98 bool parseOptionalToken(AsmToken::TokenKind T) {
99 return getParser().parseOptionalToken(T);
100 }
101
102 bool check(bool P, const Twine &Msg) {
103 return getParser().check(P, Msg);
104 }
105
106 bool check(bool P, SMLoc Loc, const Twine &Msg) {
107 return getParser().check(P, Loc, Msg);
108 }
109
110 bool addErrorSuffix(const Twine &Suffix) {
111 return getParser().addErrorSuffix(Suffix);
112 }
113
114 bool HasBracketExpressions() const { return BracketExpressionsSupported; }
115
116 /// @}
117};
118
119} // end namespace llvm
120
121#endif // LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h

1//===- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface ------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
11#define LLVM_MC_MCPARSER_MCASMLEXER_H
12
13#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/Support/SMLoc.h"
18#include <algorithm>
19#include <cassert>
20#include <cstddef>
21#include <cstdint>
22#include <string>
23
24namespace llvm {
25
26/// Target independent representation for an assembler token.
27class AsmToken {
28public:
29 enum TokenKind {
30 // Markers
31 Eof, Error,
32
33 // String values.
34 Identifier,
35 String,
36
37 // Integer values.
38 Integer,
39 BigNum, // larger than 64 bits
40
41 // Real values.
42 Real,
43
44 // Comments
45 Comment,
46 HashDirective,
47 // No-value.
48 EndOfStatement,
49 Colon,
50 Space,
51 Plus, Minus, Tilde,
52 Slash, // '/'
53 BackSlash, // '\'
54 LParen, RParen, LBrac, RBrac, LCurly, RCurly,
55 Star, Dot, Comma, Dollar, Equal, EqualEqual,
56
57 Pipe, PipePipe, Caret,
58 Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
59 Less, LessEqual, LessLess, LessGreater,
60 Greater, GreaterEqual, GreaterGreater, At,
61
62 // MIPS unary expression operators such as %neg.
63 PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
64 PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
65 PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
66 PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
67 PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
68 PercentTprel_Lo
69 };
70
71private:
72 TokenKind Kind;
73
74 /// A reference to the entire token contents; this is always a pointer into
75 /// a memory buffer owned by the source manager.
76 StringRef Str;
77
78 APInt IntVal;
79
80public:
81 AsmToken() = default;
82 AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
83 : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
84 AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
85 : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
86
87 TokenKind getKind() const { return Kind; }
88 bool is(TokenKind K) const { return Kind == K; }
89 bool isNot(TokenKind K) const { return Kind != K; }
13
Assuming the condition is false
90
91 SMLoc getLoc() const;
92 SMLoc getEndLoc() const;
93 SMRange getLocRange() const;
94
95 /// Get the contents of a string token (without quotes).
96 StringRef getStringContents() const {
97 assert(Kind == String && "This token isn't a string!")(static_cast <bool> (Kind == String && "This token isn't a string!"
) ? void (0) : __assert_fail ("Kind == String && \"This token isn't a string!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h"
, 97, __extension__ __PRETTY_FUNCTION__))
;
98 return Str.slice(1, Str.size() - 1);
99 }
100
101 /// Get the identifier string for the current token, which should be an
102 /// identifier or a string. This gets the portion of the string which should
103 /// be used as the identifier, e.g., it does not include the quotes on
104 /// strings.
105 StringRef getIdentifier() const {
106 if (Kind == Identifier)
107 return getString();
108 return getStringContents();
109 }
110
111 /// Get the string for the current token, this includes all characters (for
112 /// example, the quotes on strings) in the token.
113 ///
114 /// The returned StringRef points into the source manager's memory buffer, and
115 /// is safe to store across calls to Lex().
116 StringRef getString() const { return Str; }
117
118 // FIXME: Don't compute this in advance, it makes every token larger, and is
119 // also not generally what we want (it is nicer for recovery etc. to lex 123br
120 // as a single token, then diagnose as an invalid number).
121 int64_t getIntVal() const {
122 assert(Kind == Integer && "This token isn't an integer!")(static_cast <bool> (Kind == Integer && "This token isn't an integer!"
) ? void (0) : __assert_fail ("Kind == Integer && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 return IntVal.getZExtValue();
124 }
125
126 APInt getAPIntVal() const {
127 assert((Kind == Integer || Kind == BigNum) &&(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
128 "This token isn't an integer!")(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
;
129 return IntVal;
130 }
131};
132
133/// A callback class which is notified of each comment in an assembly file as
134/// it is lexed.
135class AsmCommentConsumer {
136public:
137 virtual ~AsmCommentConsumer() = default;
138
139 /// Callback function for when a comment is lexed. Loc is the start of the
140 /// comment text (excluding the comment-start marker). CommentText is the text
141 /// of the comment, excluding the comment start and end markers, and the
142 /// newline for single-line comments.
143 virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
144};
145
146
147/// Generic assembler lexer interface, for use by target specific assembly
148/// lexers.
149class MCAsmLexer {
150 /// The current token, stored in the base class for faster access.
151 SmallVector<AsmToken, 1> CurTok;
152
153 /// The location and description of the current error
154 SMLoc ErrLoc;
155 std::string Err;
156
157protected: // Can only create subclasses.
158 const char *TokStart = nullptr;
159 bool SkipSpace = true;
160 bool AllowAtInIdentifier;
161 bool IsAtStartOfStatement = true;
162 AsmCommentConsumer *CommentConsumer = nullptr;
163
164 bool AltMacroMode;
165 MCAsmLexer();
166
167 virtual AsmToken LexToken() = 0;
168
169 void SetError(SMLoc errLoc, const std::string &err) {
170 ErrLoc = errLoc;
171 Err = err;
172 }
173
174public:
175 MCAsmLexer(const MCAsmLexer &) = delete;
176 MCAsmLexer &operator=(const MCAsmLexer &) = delete;
177 virtual ~MCAsmLexer();
178
179 bool IsaAltMacroMode() {
180 return AltMacroMode;
181 }
182
183 void SetAltMacroMode(bool AltMacroSet) {
184 AltMacroMode = AltMacroSet;
185 }
186
187 /// Consume the next token from the input stream and return it.
188 ///
189 /// The lexer will continuosly return the end-of-file token once the end of
190 /// the main input file has been reached.
191 const AsmToken &Lex() {
192 assert(!CurTok.empty())(static_cast <bool> (!CurTok.empty()) ? void (0) : __assert_fail
("!CurTok.empty()", "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 // Mark if we parsing out a EndOfStatement.
194 IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
195 CurTok.erase(CurTok.begin());
196 // LexToken may generate multiple tokens via UnLex but will always return
197 // the first one. Place returned value at head of CurTok vector.
198 if (CurTok.empty()) {
199 AsmToken T = LexToken();
200 CurTok.insert(CurTok.begin(), T);
201 }
202 return CurTok.front();
203 }
204
205 void UnLex(AsmToken const &Token) {
206 IsAtStartOfStatement = false;
207 CurTok.insert(CurTok.begin(), Token);
208 }
209
210 bool isAtStartOfStatement() { return IsAtStartOfStatement; }
211
212 virtual StringRef LexUntilEndOfStatement() = 0;
213
214 /// Get the current source location.
215 SMLoc getLoc() const;
216
217 /// Get the current (last) lexed token.
218 const AsmToken &getTok() const {
219 return CurTok[0];
220 }
221
222 /// Look ahead at the next token to be lexed.
223 const AsmToken peekTok(bool ShouldSkipSpace = true) {
224 AsmToken Tok;
225
226 MutableArrayRef<AsmToken> Buf(Tok);
227 size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);
228
229 assert(ReadCount == 1)(static_cast <bool> (ReadCount == 1) ? void (0) : __assert_fail
("ReadCount == 1", "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/MC/MCParser/MCAsmLexer.h"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230 (void)ReadCount;
231
232 return Tok;
233 }
234
235 /// Look ahead an arbitrary number of tokens.
236 virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
237 bool ShouldSkipSpace = true) = 0;
238
239 /// Get the current error location
240 SMLoc getErrLoc() {
241 return ErrLoc;
242 }
243
244 /// Get the current error string
245 const std::string &getErr() {
246 return Err;
247 }
248
249 /// Get the kind of current token.
250 AsmToken::TokenKind getKind() const { return getTok().getKind(); }
251
252 /// Check if the current token has kind \p K.
253 bool is(AsmToken::TokenKind K) const { return getTok().is(K); }
254
255 /// Check if the current token has kind \p K.
256 bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }
10
Calling 'MCAsmLexer::getTok'
11
Returning from 'MCAsmLexer::getTok'
12
Calling 'AsmToken::isNot'
14
Returning from 'AsmToken::isNot'
257
258 /// Set whether spaces should be ignored by the lexer
259 void setSkipSpace(bool val) { SkipSpace = val; }
260
261 bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
262 void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
263
264 void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
265 this->CommentConsumer = CommentConsumer;
266 }
267};
268
269} // end namespace llvm
270
271#endif // LLVM_MC_MCPARSER_MCASMLEXER_H

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h

1//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_ADT_TWINE_H
11#define LLVM_ADT_TWINE_H
12
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/Support/ErrorHandling.h"
16#include <cassert>
17#include <cstdint>
18#include <string>
19
20namespace llvm {
21
22 class formatv_object_base;
23 class raw_ostream;
24
25 /// Twine - A lightweight data structure for efficiently representing the
26 /// concatenation of temporary values as strings.
27 ///
28 /// A Twine is a kind of rope, it represents a concatenated string using a
29 /// binary-tree, where the string is the preorder of the nodes. Since the
30 /// Twine can be efficiently rendered into a buffer when its result is used,
31 /// it avoids the cost of generating temporary values for intermediate string
32 /// results -- particularly in cases when the Twine result is never
33 /// required. By explicitly tracking the type of leaf nodes, we can also avoid
34 /// the creation of temporary strings for conversions operations (such as
35 /// appending an integer to a string).
36 ///
37 /// A Twine is not intended for use directly and should not be stored, its
38 /// implementation relies on the ability to store pointers to temporary stack
39 /// objects which may be deallocated at the end of a statement. Twines should
40 /// only be used accepted as const references in arguments, when an API wishes
41 /// to accept possibly-concatenated strings.
42 ///
43 /// Twines support a special 'null' value, which always concatenates to form
44 /// itself, and renders as an empty string. This can be returned from APIs to
45 /// effectively nullify any concatenations performed on the result.
46 ///
47 /// \b Implementation
48 ///
49 /// Given the nature of a Twine, it is not possible for the Twine's
50 /// concatenation method to construct interior nodes; the result must be
51 /// represented inside the returned value. For this reason a Twine object
52 /// actually holds two values, the left- and right-hand sides of a
53 /// concatenation. We also have nullary Twine objects, which are effectively
54 /// sentinel values that represent empty strings.
55 ///
56 /// Thus, a Twine can effectively have zero, one, or two children. The \see
57 /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
58 /// testing the number of children.
59 ///
60 /// We maintain a number of invariants on Twine objects (FIXME: Why):
61 /// - Nullary twines are always represented with their Kind on the left-hand
62 /// side, and the Empty kind on the right-hand side.
63 /// - Unary twines are always represented with the value on the left-hand
64 /// side, and the Empty kind on the right-hand side.
65 /// - If a Twine has another Twine as a child, that child should always be
66 /// binary (otherwise it could have been folded into the parent).
67 ///
68 /// These invariants are check by \see isValid().
69 ///
70 /// \b Efficiency Considerations
71 ///
72 /// The Twine is designed to yield efficient and small code for common
73 /// situations. For this reason, the concat() method is inlined so that
74 /// concatenations of leaf nodes can be optimized into stores directly into a
75 /// single stack allocated object.
76 ///
77 /// In practice, not all compilers can be trusted to optimize concat() fully,
78 /// so we provide two additional methods (and accompanying operator+
79 /// overloads) to guarantee that particularly important cases (cstring plus
80 /// StringRef) codegen as desired.
81 class Twine {
82 /// NodeKind - Represent the type of an argument.
83 enum NodeKind : unsigned char {
84 /// An empty string; the result of concatenating anything with it is also
85 /// empty.
86 NullKind,
87
88 /// The empty string.
89 EmptyKind,
90
91 /// A pointer to a Twine instance.
92 TwineKind,
93
94 /// A pointer to a C string instance.
95 CStringKind,
96
97 /// A pointer to an std::string instance.
98 StdStringKind,
99
100 /// A pointer to a StringRef instance.
101 StringRefKind,
102
103 /// A pointer to a SmallString instance.
104 SmallStringKind,
105
106 /// A pointer to a formatv_object_base instance.
107 FormatvObjectKind,
108
109 /// A char value, to render as a character.
110 CharKind,
111
112 /// An unsigned int value, to render as an unsigned decimal integer.
113 DecUIKind,
114
115 /// An int value, to render as a signed decimal integer.
116 DecIKind,
117
118 /// A pointer to an unsigned long value, to render as an unsigned decimal
119 /// integer.
120 DecULKind,
121
122 /// A pointer to a long value, to render as a signed decimal integer.
123 DecLKind,
124
125 /// A pointer to an unsigned long long value, to render as an unsigned
126 /// decimal integer.
127 DecULLKind,
128
129 /// A pointer to a long long value, to render as a signed decimal integer.
130 DecLLKind,
131
132 /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
133 /// integer.
134 UHexKind
135 };
136
137 union Child
138 {
139 const Twine *twine;
140 const char *cString;
141 const std::string *stdString;
142 const StringRef *stringRef;
143 const SmallVectorImpl<char> *smallString;
144 const formatv_object_base *formatvObject;
145 char character;
146 unsigned int decUI;
147 int decI;
148 const unsigned long *decUL;
149 const long *decL;
150 const unsigned long long *decULL;
151 const long long *decLL;
152 const uint64_t *uHex;
153 };
154
155 /// LHS - The prefix in the concatenation, which may be uninitialized for
156 /// Null or Empty kinds.
157 Child LHS;
158
159 /// RHS - The suffix in the concatenation, which may be uninitialized for
160 /// Null or Empty kinds.
161 Child RHS;
162
163 /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
164 NodeKind LHSKind = EmptyKind;
165
166 /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
167 NodeKind RHSKind = EmptyKind;
168
169 /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
170 explicit Twine(NodeKind Kind) : LHSKind(Kind) {
171 assert(isNullary() && "Invalid kind!")(static_cast <bool> (isNullary() && "Invalid kind!"
) ? void (0) : __assert_fail ("isNullary() && \"Invalid kind!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 171, __extension__ __PRETTY_FUNCTION__))
;
172 }
173
174 /// Construct a binary twine.
175 explicit Twine(const Twine &LHS, const Twine &RHS)
176 : LHSKind(TwineKind), RHSKind(TwineKind) {
177 this->LHS.twine = &LHS;
178 this->RHS.twine = &RHS;
179 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180 }
181
182 /// Construct a twine from explicit values.
183 explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
184 : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
185 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 185, __extension__ __PRETTY_FUNCTION__))
;
186 }
187
188 /// Check for the null twine.
189 bool isNull() const {
190 return getLHSKind() == NullKind;
191 }
192
193 /// Check for the empty twine.
194 bool isEmpty() const {
195 return getLHSKind() == EmptyKind;
196 }
197
198 /// Check if this is a nullary twine (null or empty).
199 bool isNullary() const {
200 return isNull() || isEmpty();
201 }
202
203 /// Check if this is a unary twine.
204 bool isUnary() const {
205 return getRHSKind() == EmptyKind && !isNullary();
206 }
207
208 /// Check if this is a binary twine.
209 bool isBinary() const {
210 return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
211 }
212
213 /// Check if this is a valid twine (satisfying the invariants on
214 /// order and number of arguments).
215 bool isValid() const {
216 // Nullary twines always have Empty on the RHS.
217 if (isNullary() && getRHSKind() != EmptyKind)
218 return false;
219
220 // Null should never appear on the RHS.
221 if (getRHSKind() == NullKind)
222 return false;
223
224 // The RHS cannot be non-empty if the LHS is empty.
225 if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
226 return false;
227
228 // A twine child should always be binary.
229 if (getLHSKind() == TwineKind &&
230 !LHS.twine->isBinary())
231 return false;
232 if (getRHSKind() == TwineKind &&
233 !RHS.twine->isBinary())
234 return false;
235
236 return true;
237 }
238
239 /// Get the NodeKind of the left-hand side.
240 NodeKind getLHSKind() const { return LHSKind; }
241
242 /// Get the NodeKind of the right-hand side.
243 NodeKind getRHSKind() const { return RHSKind; }
244
245 /// Print one child from a twine.
246 void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
247
248 /// Print the representation of one child from a twine.
249 void printOneChildRepr(raw_ostream &OS, Child Ptr,
250 NodeKind Kind) const;
251
252 public:
253 /// @name Constructors
254 /// @{
255
256 /// Construct from an empty string.
257 /*implicit*/ Twine() {
258 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 258, __extension__ __PRETTY_FUNCTION__))
;
259 }
260
261 Twine(const Twine &) = default;
262
263 /// Construct from a C string.
264 ///
265 /// We take care here to optimize "" into the empty twine -- this will be
266 /// optimized out for string constants. This allows Twine arguments have
267 /// default "" values, without introducing unnecessary string constants.
268 /*implicit*/ Twine(const char *Str) {
23
Calling implicit default constructor for 'Child'
24
Returning from default constructor for 'Child'
25
Calling implicit default constructor for 'Child'
26
Returning from default constructor for 'Child'
269 if (Str[0] != '\0') {
27
Taking true branch
270 LHS.cString = Str;
271 LHSKind = CStringKind;
272 } else
273 LHSKind = EmptyKind;
274
275 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 275, __extension__ __PRETTY_FUNCTION__))
;
28
Within the expansion of the macro 'assert':
a
Assuming the condition is true
276 }
277
278 /// Construct from an std::string.
279 /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
280 LHS.stdString = &Str;
281 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 281, __extension__ __PRETTY_FUNCTION__))
;
282 }
283
284 /// Construct from a StringRef.
285 /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) {
286 LHS.stringRef = &Str;
287 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 287, __extension__ __PRETTY_FUNCTION__))
;
288 }
289
290 /// Construct from a SmallString.
291 /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
292 : LHSKind(SmallStringKind) {
293 LHS.smallString = &Str;
294 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 }
296
297 /// Construct from a formatv_object_base.
298 /*implicit*/ Twine(const formatv_object_base &Fmt)
299 : LHSKind(FormatvObjectKind) {
300 LHS.formatvObject = &Fmt;
301 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 301, __extension__ __PRETTY_FUNCTION__))
;
302 }
303
304 /// Construct from a char.
305 explicit Twine(char Val) : LHSKind(CharKind) {
306 LHS.character = Val;
307 }
308
309 /// Construct from a signed char.
310 explicit Twine(signed char Val) : LHSKind(CharKind) {
311 LHS.character = static_cast<char>(Val);
312 }
313
314 /// Construct from an unsigned char.
315 explicit Twine(unsigned char Val) : LHSKind(CharKind) {
316 LHS.character = static_cast<char>(Val);
317 }
318
319 /// Construct a twine to print \p Val as an unsigned decimal integer.
320 explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
321 LHS.decUI = Val;
322 }
323
324 /// Construct a twine to print \p Val as a signed decimal integer.
325 explicit Twine(int Val) : LHSKind(DecIKind) {
326 LHS.decI = Val;
327 }
328
329 /// Construct a twine to print \p Val as an unsigned decimal integer.
330 explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
331 LHS.decUL = &Val;
332 }
333
334 /// Construct a twine to print \p Val as a signed decimal integer.
335 explicit Twine(const long &Val) : LHSKind(DecLKind) {
336 LHS.decL = &Val;
337 }
338
339 /// Construct a twine to print \p Val as an unsigned decimal integer.
340 explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
341 LHS.decULL = &Val;
342 }
343
344 /// Construct a twine to print \p Val as a signed decimal integer.
345 explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
346 LHS.decLL = &Val;
347 }
348
349 // FIXME: Unfortunately, to make sure this is as efficient as possible we
350 // need extra binary constructors from particular types. We can't rely on
351 // the compiler to be smart enough to fold operator+()/concat() down to the
352 // right thing. Yet.
353
354 /// Construct as the concatenation of a C string and a StringRef.
355 /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
356 : LHSKind(CStringKind), RHSKind(StringRefKind) {
357 this->LHS.cString = LHS;
358 this->RHS.stringRef = &RHS;
359 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 359, __extension__ __PRETTY_FUNCTION__))
;
360 }
361
362 /// Construct as the concatenation of a StringRef and a C string.
363 /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
364 : LHSKind(StringRefKind), RHSKind(CStringKind) {
365 this->LHS.stringRef = &LHS;
366 this->RHS.cString = RHS;
367 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 367, __extension__ __PRETTY_FUNCTION__))
;
368 }
369
370 /// Since the intended use of twines is as temporary objects, assignments
371 /// when concatenating might cause undefined behavior or stack corruptions
372 Twine &operator=(const Twine &) = delete;
373
374 /// Create a 'null' string, which is an empty string that always
375 /// concatenates to form another empty string.
376 static Twine createNull() {
377 return Twine(NullKind);
378 }
379
380 /// @}
381 /// @name Numeric Conversions
382 /// @{
383
384 // Construct a twine to print \p Val as an unsigned hexadecimal integer.
385 static Twine utohexstr(const uint64_t &Val) {
386 Child LHS, RHS;
387 LHS.uHex = &Val;
388 RHS.twine = nullptr;
389 return Twine(LHS, UHexKind, RHS, EmptyKind);
390 }
391
392 /// @}
393 /// @name Predicate Operations
394 /// @{
395
396 /// Check if this twine is trivially empty; a false return value does not
397 /// necessarily mean the twine is empty.
398 bool isTriviallyEmpty() const {
399 return isNullary();
400 }
401
402 /// Return true if this twine can be dynamically accessed as a single
403 /// StringRef value with getSingleStringRef().
404 bool isSingleStringRef() const {
405 if (getRHSKind() != EmptyKind) return false;
406
407 switch (getLHSKind()) {
408 case EmptyKind:
409 case CStringKind:
410 case StdStringKind:
411 case StringRefKind:
412 case SmallStringKind:
413 return true;
414 default:
415 return false;
416 }
417 }
418
419 /// @}
420 /// @name String Operations
421 /// @{
422
423 Twine concat(const Twine &Suffix) const;
424
425 /// @}
426 /// @name Output & Conversion.
427 /// @{
428
429 /// Return the twine contents as a std::string.
430 std::string str() const;
431
432 /// Append the concatenated string into the given SmallString or SmallVector.
433 void toVector(SmallVectorImpl<char> &Out) const;
434
435 /// This returns the twine as a single StringRef. This method is only valid
436 /// if isSingleStringRef() is true.
437 StringRef getSingleStringRef() const {
438 assert(isSingleStringRef() &&"This cannot be had as a single stringref!")(static_cast <bool> (isSingleStringRef() &&"This cannot be had as a single stringref!"
) ? void (0) : __assert_fail ("isSingleStringRef() &&\"This cannot be had as a single stringref!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 438, __extension__ __PRETTY_FUNCTION__))
;
439 switch (getLHSKind()) {
440 default: llvm_unreachable("Out of sync with isSingleStringRef")::llvm::llvm_unreachable_internal("Out of sync with isSingleStringRef"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/Twine.h"
, 440)
;
441 case EmptyKind: return StringRef();
442 case CStringKind: return StringRef(LHS.cString);
443 case StdStringKind: return StringRef(*LHS.stdString);
444 case StringRefKind: return *LHS.stringRef;
445 case SmallStringKind:
446 return StringRef(LHS.smallString->data(), LHS.smallString->size());
447 }
448 }
449
450 /// This returns the twine as a single StringRef if it can be
451 /// represented as such. Otherwise the twine is written into the given
452 /// SmallVector and a StringRef to the SmallVector's data is returned.
453 StringRef toStringRef(SmallVectorImpl<char> &Out) const {
454 if (isSingleStringRef())
455 return getSingleStringRef();
456 toVector(Out);
457 return StringRef(Out.data(), Out.size());
458 }
459
460 /// This returns the twine as a single null terminated StringRef if it
461 /// can be represented as such. Otherwise the twine is written into the
462 /// given SmallVector and a StringRef to the SmallVector's data is returned.
463 ///
464 /// The returned StringRef's size does not include the null terminator.
465 StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
466
467 /// Write the concatenated string represented by this twine to the
468 /// stream \p OS.
469 void print(raw_ostream &OS) const;
470
471 /// Dump the concatenated string represented by this twine to stderr.
472 void dump() const;
473
474 /// Write the representation of this twine to the stream \p OS.
475 void printRepr(raw_ostream &OS) const;
476
477 /// Dump the representation of this twine to stderr.
478 void dumpRepr() const;
479
480 /// @}
481 };
482
483 /// @name Twine Inline Implementations
484 /// @{
485
486 inline Twine Twine::concat(const Twine &Suffix) const {
487 // Concatenation with null is null.
488 if (isNull() || Suffix.isNull())
489 return Twine(NullKind);
490
491 // Concatenation with empty yields the other side.
492 if (isEmpty())
493 return Suffix;
494 if (Suffix.isEmpty())
495 return *this;
496
497 // Otherwise we need to create a new node, taking care to fold in unary
498 // twines.
499 Child NewLHS, NewRHS;
500 NewLHS.twine = this;
501 NewRHS.twine = &Suffix;
502 NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
503 if (isUnary()) {
504 NewLHS = LHS;
505 NewLHSKind = getLHSKind();
506 }
507 if (Suffix.isUnary()) {
508 NewRHS = Suffix.LHS;
509 NewRHSKind = Suffix.getLHSKind();
510 }
511
512 return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
513 }
514
515 inline Twine operator+(const Twine &LHS, const Twine &RHS) {
516 return LHS.concat(RHS);
517 }
518
519 /// Additional overload to guarantee simplified codegen; this is equivalent to
520 /// concat().
521
522 inline Twine operator+(const char *LHS, const StringRef &RHS) {
523 return Twine(LHS, RHS);
524 }
525
526 /// Additional overload to guarantee simplified codegen; this is equivalent to
527 /// concat().
528
529 inline Twine operator+(const StringRef &LHS, const char *RHS) {
530 return Twine(LHS, RHS);
531 }
532
533 inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
534 RHS.print(OS);
535 return OS;
536 }
537
538 /// @}
539
540} // end namespace llvm
541
542#endif // LLVM_ADT_TWINE_H