LLVM  10.0.0svn
MachineIRBuilder.cpp
Go to the documentation of this file.
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
13 
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
27  State.MF = &MF;
28  State.MBB = nullptr;
29  State.MRI = &MF.getRegInfo();
30  State.TII = MF.getSubtarget().getInstrInfo();
31  State.DL = DebugLoc();
33  State.Observer = nullptr;
34 }
35 
37  State.MBB = &MBB;
38  State.II = MBB.end();
39  assert(&getMF() == MBB.getParent() &&
40  "Basic block is in a different function");
41 }
42 
44  assert(MI.getParent() && "Instruction is not part of a basic block");
45  setMBB(*MI.getParent());
46  State.II = MI.getIterator();
47 }
48 
50 
53  assert(MBB.getParent() == &getMF() &&
54  "Basic block is in a different function");
55  State.MBB = &MBB;
56  State.II = II;
57 }
58 
59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
60  if (State.Observer)
61  State.Observer->createdInstr(*InsertedInstr);
62 }
63 
65  State.Observer = &Observer;
66 }
67 
69 
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
73 
75  return insertInstr(buildInstrNoInsert(Opcode));
76 }
77 
79  MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
80  return MIB;
81 }
82 
84  getMBB().insert(getInsertPt(), MIB);
85  recordInsertion(MIB);
86  return MIB;
87 }
88 
91  const MDNode *Expr) {
92  assert(isa<DILocalVariable>(Variable) && "not a variable");
93  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
94  assert(
95  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
96  "Expected inlined-at fields to agree");
97  return insertInstr(BuildMI(getMF(), getDL(),
98  getTII().get(TargetOpcode::DBG_VALUE),
99  /*IsIndirect*/ false, Reg, Variable, Expr));
100 }
101 
104  const MDNode *Expr) {
105  assert(isa<DILocalVariable>(Variable) && "not a variable");
106  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
107  assert(
108  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
109  "Expected inlined-at fields to agree");
110  // DBG_VALUE insts now carry IR-level indirection in their DIExpression
111  // rather than encoding it in the instruction itself.
112  const DIExpression *DIExpr = cast<DIExpression>(Expr);
113  DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref});
114  return insertInstr(BuildMI(getMF(), getDL(),
115  getTII().get(TargetOpcode::DBG_VALUE),
116  /*IsIndirect*/ false, Reg, Variable, DIExpr));
117 }
118 
120  const MDNode *Variable,
121  const MDNode *Expr) {
122  assert(isa<DILocalVariable>(Variable) && "not a variable");
123  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
124  assert(
125  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
126  "Expected inlined-at fields to agree");
127  // DBG_VALUE insts now carry IR-level indirection in their DIExpression
128  // rather than encoding it in the instruction itself.
129  const DIExpression *DIExpr = cast<DIExpression>(Expr);
130  DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref});
131  return buildInstr(TargetOpcode::DBG_VALUE)
132  .addFrameIndex(FI)
133  .addReg(0)
134  .addMetadata(Variable)
135  .addMetadata(DIExpr);
136 }
137 
139  const MDNode *Variable,
140  const MDNode *Expr) {
141  assert(isa<DILocalVariable>(Variable) && "not a variable");
142  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
143  assert(
144  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
145  "Expected inlined-at fields to agree");
146  auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
147  if (auto *CI = dyn_cast<ConstantInt>(&C)) {
148  if (CI->getBitWidth() > 64)
149  MIB.addCImm(CI);
150  else
151  MIB.addImm(CI->getZExtValue());
152  } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
153  MIB.addFPImm(CFP);
154  } else {
155  // Insert %noreg if we didn't find a usable constant and had to drop it.
156  MIB.addReg(0U);
157  }
158 
159  return MIB.addReg(0).addMetadata(Variable).addMetadata(Expr);
160 }
161 
163  assert(isa<DILabel>(Label) && "not a label");
164  assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
165  "Expected inlined-at fields to agree");
166  auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
167 
168  return MIB.addMetadata(Label);
169 }
170 
172  const SrcOp &Size,
173  unsigned Align) {
174  assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
175  auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
176  Res.addDefToMIB(*getMRI(), MIB);
177  Size.addSrcToMIB(MIB);
178  MIB.addImm(Align);
179  return MIB;
180 }
181 
183  int Idx) {
184  assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
185  auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
186  Res.addDefToMIB(*getMRI(), MIB);
187  MIB.addFrameIndex(Idx);
188  return MIB;
189 }
190 
192  const GlobalValue *GV) {
193  assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
195  GV->getType()->getAddressSpace() &&
196  "address space mismatch");
197 
198  auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
199  Res.addDefToMIB(*getMRI(), MIB);
200  MIB.addGlobalAddress(GV);
201  return MIB;
202 }
203 
205  unsigned JTI) {
206  return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
207  .addJumpTableIndex(JTI);
208 }
209 
210 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
211  const LLT &Op1) {
212  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
213  assert((Res == Op0 && Res == Op1) && "type mismatch");
214 }
215 
216 void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
217  const LLT &Op1) {
218  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
219  assert((Res == Op0) && "type mismatch");
220 }
221 
223  const SrcOp &Op0,
224  const SrcOp &Op1) {
225  assert(Res.getLLTTy(*getMRI()).isPointer() &&
226  Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
227  assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
228 
229  return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1});
230 }
231 
234  const LLT &ValueTy, uint64_t Value) {
235  assert(Res == 0 && "Res is a result argument");
236  assert(ValueTy.isScalar() && "invalid offset type");
237 
238  if (Value == 0) {
239  Res = Op0;
240  return None;
241  }
242 
244  auto Cst = buildConstant(ValueTy, Value);
245  return buildGEP(Res, Op0, Cst.getReg(0));
246 }
247 
249  const SrcOp &Op0,
250  uint32_t NumBits) {
251  assert(Res.getLLTTy(*getMRI()).isPointer() &&
252  Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
253 
254  auto MIB = buildInstr(TargetOpcode::G_PTR_MASK);
255  Res.addDefToMIB(*getMRI(), MIB);
256  Op0.addSrcToMIB(MIB);
257  MIB.addImm(NumBits);
258  return MIB;
259 }
260 
262  return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
263 }
264 
266  assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
267  return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
268 }
269 
271  unsigned JTI,
272  Register IndexReg) {
273  assert(getMRI()->getType(TablePtr).isPointer() &&
274  "Table reg must be a pointer");
275  return buildInstr(TargetOpcode::G_BRJT)
276  .addUse(TablePtr)
277  .addJumpTableIndex(JTI)
278  .addUse(IndexReg);
279 }
280 
282  const SrcOp &Op) {
283  return buildInstr(TargetOpcode::COPY, Res, Op);
284 }
285 
287  const ConstantInt &Val) {
288  LLT Ty = Res.getLLTTy(*getMRI());
289  LLT EltTy = Ty.getScalarType();
290  assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
291  "creating constant with the wrong size");
292 
293  if (Ty.isVector()) {
294  auto Const = buildInstr(TargetOpcode::G_CONSTANT)
295  .addDef(getMRI()->createGenericVirtualRegister(EltTy))
296  .addCImm(&Val);
297  return buildSplatVector(Res, Const);
298  }
299 
300  auto Const = buildInstr(TargetOpcode::G_CONSTANT);
301  Res.addDefToMIB(*getMRI(), Const);
302  Const.addCImm(&Val);
303  return Const;
304 }
305 
307  int64_t Val) {
308  auto IntN = IntegerType::get(getMF().getFunction().getContext(),
310  ConstantInt *CI = ConstantInt::get(IntN, Val, true);
311  return buildConstant(Res, *CI);
312 }
313 
315  const ConstantFP &Val) {
316  LLT Ty = Res.getLLTTy(*getMRI());
317  LLT EltTy = Ty.getScalarType();
318 
320  == EltTy.getSizeInBits() &&
321  "creating fconstant with the wrong size");
322 
323  assert(!Ty.isPointer() && "invalid operand type");
324 
325  if (Ty.isVector()) {
326  auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
327  .addDef(getMRI()->createGenericVirtualRegister(EltTy))
328  .addFPImm(&Val);
329 
330  return buildSplatVector(Res, Const);
331  }
332 
333  auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
334  Res.addDefToMIB(*getMRI(), Const);
335  Const.addFPImm(&Val);
336  return Const;
337 }
338 
340  const APInt &Val) {
341  ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
342  return buildConstant(Res, *CI);
343 }
344 
346  double Val) {
347  LLT DstTy = Res.getLLTTy(*getMRI());
348  auto &Ctx = getMF().getFunction().getContext();
349  auto *CFP =
351  return buildFConstant(Res, *CFP);
352 }
353 
355  const APFloat &Val) {
356  auto &Ctx = getMF().getFunction().getContext();
357  auto *CFP = ConstantFP::get(Ctx, Val);
358  return buildFConstant(Res, *CFP);
359 }
360 
362  MachineBasicBlock &Dest) {
363  assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
364 
365  return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
366 }
367 
369  const SrcOp &Addr,
370  MachineMemOperand &MMO) {
371  return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
372 }
373 
375  const DstOp &Res,
376  const SrcOp &Addr,
377  MachineMemOperand &MMO) {
378  assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
379  assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
380 
381  auto MIB = buildInstr(Opcode);
382  Res.addDefToMIB(*getMRI(), MIB);
383  Addr.addSrcToMIB(MIB);
384  MIB.addMemOperand(&MMO);
385  return MIB;
386 }
387 
389  const SrcOp &Addr,
390  MachineMemOperand &MMO) {
391  assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
392  assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
393 
394  auto MIB = buildInstr(TargetOpcode::G_STORE);
395  Val.addSrcToMIB(MIB);
396  Addr.addSrcToMIB(MIB);
397  MIB.addMemOperand(&MMO);
398  return MIB;
399 }
400 
402  const DstOp &CarryOut,
403  const SrcOp &Op0,
404  const SrcOp &Op1) {
405  return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
406 }
407 
409  const DstOp &CarryOut,
410  const SrcOp &Op0,
411  const SrcOp &Op1,
412  const SrcOp &CarryIn) {
413  return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
414  {Op0, Op1, CarryIn});
415 }
416 
418  const SrcOp &Op) {
419  return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
420 }
421 
423  const SrcOp &Op) {
424  return buildInstr(TargetOpcode::G_SEXT, Res, Op);
425 }
426 
428  const SrcOp &Op) {
429  return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
430 }
431 
432 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
433  const auto *TLI = getMF().getSubtarget().getTargetLowering();
434  switch (TLI->getBooleanContents(IsVec, IsFP)) {
436  return TargetOpcode::G_SEXT;
438  return TargetOpcode::G_ZEXT;
439  default:
440  return TargetOpcode::G_ANYEXT;
441  }
442 }
443 
445  const SrcOp &Op,
446  bool IsFP) {
447  unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
448  return buildInstr(ExtOp, Res, Op);
449 }
450 
452  const DstOp &Res,
453  const SrcOp &Op) {
454  assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
455  TargetOpcode::G_SEXT == ExtOpc) &&
456  "Expecting Extending Opc");
457  assert(Res.getLLTTy(*getMRI()).isScalar() ||
458  Res.getLLTTy(*getMRI()).isVector());
459  assert(Res.getLLTTy(*getMRI()).isScalar() ==
460  Op.getLLTTy(*getMRI()).isScalar());
461 
462  unsigned Opcode = TargetOpcode::COPY;
463  if (Res.getLLTTy(*getMRI()).getSizeInBits() >
464  Op.getLLTTy(*getMRI()).getSizeInBits())
465  Opcode = ExtOpc;
466  else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
467  Op.getLLTTy(*getMRI()).getSizeInBits())
468  Opcode = TargetOpcode::G_TRUNC;
469  else
470  assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
471 
472  return buildInstr(Opcode, Res, Op);
473 }
474 
476  const SrcOp &Op) {
477  return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
478 }
479 
481  const SrcOp &Op) {
482  return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
483 }
484 
486  const SrcOp &Op) {
487  return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
488 }
489 
491  const SrcOp &Src) {
492  LLT SrcTy = Src.getLLTTy(*getMRI());
493  LLT DstTy = Dst.getLLTTy(*getMRI());
494  if (SrcTy == DstTy)
495  return buildCopy(Dst, Src);
496 
497  unsigned Opcode;
498  if (SrcTy.isPointer() && DstTy.isScalar())
499  Opcode = TargetOpcode::G_PTRTOINT;
500  else if (DstTy.isPointer() && SrcTy.isScalar())
501  Opcode = TargetOpcode::G_INTTOPTR;
502  else {
503  assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
504  Opcode = TargetOpcode::G_BITCAST;
505  }
506 
507  return buildInstr(Opcode, Dst, Src);
508 }
509 
511  const SrcOp &Src,
512  uint64_t Index) {
513  LLT SrcTy = Src.getLLTTy(*getMRI());
514  LLT DstTy = Dst.getLLTTy(*getMRI());
515 
516 #ifndef NDEBUG
517  assert(SrcTy.isValid() && "invalid operand type");
518  assert(DstTy.isValid() && "invalid operand type");
519  assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
520  "extracting off end of register");
521 #endif
522 
523  if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
524  assert(Index == 0 && "insertion past the end of a register");
525  return buildCast(Dst, Src);
526  }
527 
528  auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
529  Dst.addDefToMIB(*getMRI(), Extract);
530  Src.addSrcToMIB(Extract);
531  Extract.addImm(Index);
532  return Extract;
533 }
534 
536  ArrayRef<uint64_t> Indices) {
537 #ifndef NDEBUG
538  assert(Ops.size() == Indices.size() && "incompatible args");
539  assert(!Ops.empty() && "invalid trivial sequence");
540  assert(std::is_sorted(Indices.begin(), Indices.end()) &&
541  "sequence offsets must be in ascending order");
542 
543  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
544  for (auto Op : Ops)
545  assert(getMRI()->getType(Op).isValid() && "invalid operand type");
546 #endif
547 
548  LLT ResTy = getMRI()->getType(Res);
549  LLT OpTy = getMRI()->getType(Ops[0]);
550  unsigned OpSize = OpTy.getSizeInBits();
551  bool MaybeMerge = true;
552  for (unsigned i = 0; i < Ops.size(); ++i) {
553  if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
554  MaybeMerge = false;
555  break;
556  }
557  }
558 
559  if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
560  buildMerge(Res, Ops);
561  return;
562  }
563 
564  Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
565  buildUndef(ResIn);
566 
567  for (unsigned i = 0; i < Ops.size(); ++i) {
568  Register ResOut = i + 1 == Ops.size()
569  ? Res
571  buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
572  ResIn = ResOut;
573  }
574 }
575 
577  return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
578 }
579 
581  ArrayRef<Register> Ops) {
582  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
583  // we need some temporary storage for the DstOp objects. Here we use a
584  // sufficiently large SmallVector to not go through the heap.
585  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
586  assert(TmpVec.size() > 1);
587  return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
588 }
589 
591  const SrcOp &Op) {
592  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
593  // we need some temporary storage for the DstOp objects. Here we use a
594  // sufficiently large SmallVector to not go through the heap.
595  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
596  assert(TmpVec.size() > 1);
597  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
598 }
599 
601  const SrcOp &Op) {
602  unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
604  for (unsigned I = 0; I != NumReg; ++I)
605  TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
606  return buildUnmerge(TmpVec, Op);
607 }
608 
610  const SrcOp &Op) {
611  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
612  // we need some temporary storage for the DstOp objects. Here we use a
613  // sufficiently large SmallVector to not go through the heap.
614  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
615  assert(TmpVec.size() > 1);
616  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
617 }
618 
620  ArrayRef<Register> Ops) {
621  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
622  // we need some temporary storage for the DstOp objects. Here we use a
623  // sufficiently large SmallVector to not go through the heap.
624  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
625  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
626 }
627 
629  const SrcOp &Src) {
630  SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
631  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
632 }
633 
636  ArrayRef<Register> Ops) {
637  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
638  // we need some temporary storage for the DstOp objects. Here we use a
639  // sufficiently large SmallVector to not go through the heap.
640  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
641  return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
642 }
643 
646  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
647  // we need some temporary storage for the DstOp objects. Here we use a
648  // sufficiently large SmallVector to not go through the heap.
649  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
650  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
651 }
652 
654  Register Op, unsigned Index) {
655  assert(Index + getMRI()->getType(Op).getSizeInBits() <=
656  getMRI()->getType(Res).getSizeInBits() &&
657  "insertion past the end of a register");
658 
659  if (getMRI()->getType(Res).getSizeInBits() ==
660  getMRI()->getType(Op).getSizeInBits()) {
661  return buildCast(Res, Op);
662  }
663 
664  return buildInstr(TargetOpcode::G_INSERT)
665  .addDef(Res)
666  .addUse(Src)
667  .addUse(Op)
668  .addImm(Index);
669 }
670 
672  ArrayRef<Register> ResultRegs,
673  bool HasSideEffects) {
674  auto MIB =
675  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
676  : TargetOpcode::G_INTRINSIC);
677  for (unsigned ResultReg : ResultRegs)
678  MIB.addDef(ResultReg);
679  MIB.addIntrinsicID(ID);
680  return MIB;
681 }
682 
685  bool HasSideEffects) {
686  auto MIB =
687  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
688  : TargetOpcode::G_INTRINSIC);
689  for (DstOp Result : Results)
690  Result.addDefToMIB(*getMRI(), MIB);
691  MIB.addIntrinsicID(ID);
692  return MIB;
693 }
694 
696  const SrcOp &Op) {
697  return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
698 }
699 
701  const SrcOp &Op) {
702  return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
703 }
704 
706  const DstOp &Res,
707  const SrcOp &Op0,
708  const SrcOp &Op1) {
709  return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
710 }
711 
713  const DstOp &Res,
714  const SrcOp &Op0,
715  const SrcOp &Op1,
716  Optional<unsigned> Flags) {
717 
718  return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
719 }
720 
722  const SrcOp &Tst,
723  const SrcOp &Op0,
724  const SrcOp &Op1,
725  Optional<unsigned> Flags) {
726 
727  return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
728 }
729 
732  const SrcOp &Elt, const SrcOp &Idx) {
733  return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
734 }
735 
738  const SrcOp &Idx) {
739  return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
740 }
741 
743  Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
744  Register NewVal, MachineMemOperand &MMO) {
745 #ifndef NDEBUG
746  LLT OldValResTy = getMRI()->getType(OldValRes);
747  LLT SuccessResTy = getMRI()->getType(SuccessRes);
748  LLT AddrTy = getMRI()->getType(Addr);
749  LLT CmpValTy = getMRI()->getType(CmpVal);
750  LLT NewValTy = getMRI()->getType(NewVal);
751  assert(OldValResTy.isScalar() && "invalid operand type");
752  assert(SuccessResTy.isScalar() && "invalid operand type");
753  assert(AddrTy.isPointer() && "invalid operand type");
754  assert(CmpValTy.isValid() && "invalid operand type");
755  assert(NewValTy.isValid() && "invalid operand type");
756  assert(OldValResTy == CmpValTy && "type mismatch");
757  assert(OldValResTy == NewValTy && "type mismatch");
758 #endif
759 
760  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
761  .addDef(OldValRes)
762  .addDef(SuccessRes)
763  .addUse(Addr)
764  .addUse(CmpVal)
765  .addUse(NewVal)
766  .addMemOperand(&MMO);
767 }
768 
771  Register CmpVal, Register NewVal,
772  MachineMemOperand &MMO) {
773 #ifndef NDEBUG
774  LLT OldValResTy = getMRI()->getType(OldValRes);
775  LLT AddrTy = getMRI()->getType(Addr);
776  LLT CmpValTy = getMRI()->getType(CmpVal);
777  LLT NewValTy = getMRI()->getType(NewVal);
778  assert(OldValResTy.isScalar() && "invalid operand type");
779  assert(AddrTy.isPointer() && "invalid operand type");
780  assert(CmpValTy.isValid() && "invalid operand type");
781  assert(NewValTy.isValid() && "invalid operand type");
782  assert(OldValResTy == CmpValTy && "type mismatch");
783  assert(OldValResTy == NewValTy && "type mismatch");
784 #endif
785 
786  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
787  .addDef(OldValRes)
788  .addUse(Addr)
789  .addUse(CmpVal)
790  .addUse(NewVal)
791  .addMemOperand(&MMO);
792 }
793 
795  unsigned Opcode, const DstOp &OldValRes,
796  const SrcOp &Addr, const SrcOp &Val,
797  MachineMemOperand &MMO) {
798 
799 #ifndef NDEBUG
800  LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
801  LLT AddrTy = Addr.getLLTTy(*getMRI());
802  LLT ValTy = Val.getLLTTy(*getMRI());
803  assert(OldValResTy.isScalar() && "invalid operand type");
804  assert(AddrTy.isPointer() && "invalid operand type");
805  assert(ValTy.isValid() && "invalid operand type");
806  assert(OldValResTy == ValTy && "type mismatch");
807  assert(MMO.isAtomic() && "not atomic mem operand");
808 #endif
809 
810  auto MIB = buildInstr(Opcode);
811  OldValRes.addDefToMIB(*getMRI(), MIB);
812  Addr.addSrcToMIB(MIB);
813  Val.addSrcToMIB(MIB);
814  MIB.addMemOperand(&MMO);
815  return MIB;
816 }
817 
820  Register Val, MachineMemOperand &MMO) {
821  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
822  MMO);
823 }
826  Register Val, MachineMemOperand &MMO) {
827  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
828  MMO);
829 }
832  Register Val, MachineMemOperand &MMO) {
833  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
834  MMO);
835 }
838  Register Val, MachineMemOperand &MMO) {
839  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
840  MMO);
841 }
844  Register Val, MachineMemOperand &MMO) {
845  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
846  MMO);
847 }
849  Register Addr,
850  Register Val,
851  MachineMemOperand &MMO) {
852  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
853  MMO);
854 }
857  Register Val, MachineMemOperand &MMO) {
858  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
859  MMO);
860 }
863  Register Val, MachineMemOperand &MMO) {
864  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
865  MMO);
866 }
869  Register Val, MachineMemOperand &MMO) {
870  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
871  MMO);
872 }
875  Register Val, MachineMemOperand &MMO) {
876  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
877  MMO);
878 }
881  Register Val, MachineMemOperand &MMO) {
882  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
883  MMO);
884 }
885 
888  const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
889  MachineMemOperand &MMO) {
890  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
891  MMO);
892 }
893 
895 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
896  MachineMemOperand &MMO) {
897  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
898  MMO);
899 }
900 
902 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
903  return buildInstr(TargetOpcode::G_FENCE)
904  .addImm(Ordering)
905  .addImm(Scope);
906 }
907 
910 #ifndef NDEBUG
911  assert(getMRI()->getType(Res).isPointer() && "invalid res type");
912 #endif
913 
914  return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
915 }
916 
917 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
918  bool IsExtend) {
919 #ifndef NDEBUG
920  if (DstTy.isVector()) {
921  assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
922  assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
923  "different number of elements in a trunc/ext");
924  } else
925  assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
926 
927  if (IsExtend)
928  assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
929  "invalid narrowing extend");
930  else
931  assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
932  "invalid widening trunc");
933 #endif
934 }
935 
936 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
937  const LLT &Op0Ty, const LLT &Op1Ty) {
938 #ifndef NDEBUG
939  assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
940  "invalid operand type");
941  assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
942  if (ResTy.isScalar() || ResTy.isPointer())
943  assert(TstTy.isScalar() && "type mismatch");
944  else
945  assert((TstTy.isScalar() ||
946  (TstTy.isVector() &&
947  TstTy.getNumElements() == Op0Ty.getNumElements())) &&
948  "type mismatch");
949 #endif
950 }
951 
953  ArrayRef<DstOp> DstOps,
954  ArrayRef<SrcOp> SrcOps,
955  Optional<unsigned> Flags) {
956  switch (Opc) {
957  default:
958  break;
959  case TargetOpcode::G_SELECT: {
960  assert(DstOps.size() == 1 && "Invalid select");
961  assert(SrcOps.size() == 3 && "Invalid select");
963  DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
964  SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
965  break;
966  }
967  case TargetOpcode::G_ADD:
968  case TargetOpcode::G_AND:
969  case TargetOpcode::G_MUL:
970  case TargetOpcode::G_OR:
971  case TargetOpcode::G_SUB:
972  case TargetOpcode::G_XOR:
973  case TargetOpcode::G_UDIV:
974  case TargetOpcode::G_SDIV:
975  case TargetOpcode::G_UREM:
976  case TargetOpcode::G_SREM:
977  case TargetOpcode::G_SMIN:
978  case TargetOpcode::G_SMAX:
979  case TargetOpcode::G_UMIN:
980  case TargetOpcode::G_UMAX: {
981  // All these are binary ops.
982  assert(DstOps.size() == 1 && "Invalid Dst");
983  assert(SrcOps.size() == 2 && "Invalid Srcs");
984  validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
985  SrcOps[0].getLLTTy(*getMRI()),
986  SrcOps[1].getLLTTy(*getMRI()));
987  break;
988  }
989  case TargetOpcode::G_SHL:
990  case TargetOpcode::G_ASHR:
991  case TargetOpcode::G_LSHR: {
992  assert(DstOps.size() == 1 && "Invalid Dst");
993  assert(SrcOps.size() == 2 && "Invalid Srcs");
994  validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
995  SrcOps[0].getLLTTy(*getMRI()),
996  SrcOps[1].getLLTTy(*getMRI()));
997  break;
998  }
999  case TargetOpcode::G_SEXT:
1000  case TargetOpcode::G_ZEXT:
1001  case TargetOpcode::G_ANYEXT:
1002  assert(DstOps.size() == 1 && "Invalid Dst");
1003  assert(SrcOps.size() == 1 && "Invalid Srcs");
1004  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1005  SrcOps[0].getLLTTy(*getMRI()), true);
1006  break;
1007  case TargetOpcode::G_TRUNC:
1008  case TargetOpcode::G_FPTRUNC: {
1009  assert(DstOps.size() == 1 && "Invalid Dst");
1010  assert(SrcOps.size() == 1 && "Invalid Srcs");
1011  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1012  SrcOps[0].getLLTTy(*getMRI()), false);
1013  break;
1014  }
1015  case TargetOpcode::COPY:
1016  assert(DstOps.size() == 1 && "Invalid Dst");
1017  // If the caller wants to add a subreg source it has to be done separately
1018  // so we may not have any SrcOps at this point yet.
1019  break;
1020  case TargetOpcode::G_FCMP:
1021  case TargetOpcode::G_ICMP: {
1022  assert(DstOps.size() == 1 && "Invalid Dst Operands");
1023  assert(SrcOps.size() == 3 && "Invalid Src Operands");
1024  // For F/ICMP, the first src operand is the predicate, followed by
1025  // the two comparands.
1026  assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1027  "Expecting predicate");
1028  assert([&]() -> bool {
1029  CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1030  return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1031  : CmpInst::isFPPredicate(Pred);
1032  }() && "Invalid predicate");
1033  assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1034  "Type mismatch");
1035  assert([&]() -> bool {
1036  LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1037  LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1038  if (Op0Ty.isScalar() || Op0Ty.isPointer())
1039  return DstTy.isScalar();
1040  else
1041  return DstTy.isVector() &&
1042  DstTy.getNumElements() == Op0Ty.getNumElements();
1043  }() && "Type Mismatch");
1044  break;
1045  }
1046  case TargetOpcode::G_UNMERGE_VALUES: {
1047  assert(!DstOps.empty() && "Invalid trivial sequence");
1048  assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1049  assert(std::all_of(DstOps.begin(), DstOps.end(),
1050  [&, this](const DstOp &Op) {
1051  return Op.getLLTTy(*getMRI()) ==
1052  DstOps[0].getLLTTy(*getMRI());
1053  }) &&
1054  "type mismatch in output list");
1055  assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1056  SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1057  "input operands do not cover output register");
1058  break;
1059  }
1060  case TargetOpcode::G_MERGE_VALUES: {
1061  assert(!SrcOps.empty() && "invalid trivial sequence");
1062  assert(DstOps.size() == 1 && "Invalid Dst");
1063  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1064  [&, this](const SrcOp &Op) {
1065  return Op.getLLTTy(*getMRI()) ==
1066  SrcOps[0].getLLTTy(*getMRI());
1067  }) &&
1068  "type mismatch in input list");
1069  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1070  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1071  "input operands do not cover output register");
1072  if (SrcOps.size() == 1)
1073  return buildCast(DstOps[0], SrcOps[0]);
1074  if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1075  if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1076  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1077  return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1078  }
1079  break;
1080  }
1081  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1082  assert(DstOps.size() == 1 && "Invalid Dst size");
1083  assert(SrcOps.size() == 2 && "Invalid Src size");
1084  assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1085  assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1086  DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1087  "Invalid operand type");
1088  assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1089  assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1090  DstOps[0].getLLTTy(*getMRI()) &&
1091  "Type mismatch");
1092  break;
1093  }
1094  case TargetOpcode::G_INSERT_VECTOR_ELT: {
1095  assert(DstOps.size() == 1 && "Invalid dst size");
1096  assert(SrcOps.size() == 3 && "Invalid src size");
1097  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1098  SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1099  assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1100  SrcOps[1].getLLTTy(*getMRI()) &&
1101  "Type mismatch");
1102  assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1103  assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1104  SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1105  "Type mismatch");
1106  break;
1107  }
1108  case TargetOpcode::G_BUILD_VECTOR: {
1109  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1110  "Must have at least 2 operands");
1111  assert(DstOps.size() == 1 && "Invalid DstOps");
1112  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1113  "Res type must be a vector");
1114  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1115  [&, this](const SrcOp &Op) {
1116  return Op.getLLTTy(*getMRI()) ==
1117  SrcOps[0].getLLTTy(*getMRI());
1118  }) &&
1119  "type mismatch in input list");
1120  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1121  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1122  "input scalars do not exactly cover the output vector register");
1123  break;
1124  }
1125  case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1126  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1127  "Must have at least 2 operands");
1128  assert(DstOps.size() == 1 && "Invalid DstOps");
1129  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1130  "Res type must be a vector");
1131  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1132  [&, this](const SrcOp &Op) {
1133  return Op.getLLTTy(*getMRI()) ==
1134  SrcOps[0].getLLTTy(*getMRI());
1135  }) &&
1136  "type mismatch in input list");
1137  if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1138  DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1139  return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1140  break;
1141  }
1142  case TargetOpcode::G_CONCAT_VECTORS: {
1143  assert(DstOps.size() == 1 && "Invalid DstOps");
1144  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1145  "Must have at least 2 operands");
1146  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1147  [&, this](const SrcOp &Op) {
1148  return (Op.getLLTTy(*getMRI()).isVector() &&
1149  Op.getLLTTy(*getMRI()) ==
1150  SrcOps[0].getLLTTy(*getMRI()));
1151  }) &&
1152  "type mismatch in input list");
1153  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1154  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1155  "input vectors do not exactly cover the output vector register");
1156  break;
1157  }
1158  case TargetOpcode::G_UADDE: {
1159  assert(DstOps.size() == 2 && "Invalid no of dst operands");
1160  assert(SrcOps.size() == 3 && "Invalid no of src operands");
1161  assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1162  assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1163  (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1164  "Invalid operand");
1165  assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1166  assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1167  "type mismatch");
1168  break;
1169  }
1170  }
1171 
1172  auto MIB = buildInstr(Opc);
1173  for (const DstOp &Op : DstOps)
1174  Op.addDefToMIB(*getMRI(), MIB);
1175  for (const SrcOp &Op : SrcOps)
1176  Op.addSrcToMIB(MIB);
1177  if (Flags)
1178  MIB->setFlags(*Flags);
1179  return MIB;
1180 }
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
bool isFPPredicate() const
Definition: InstrTypes.h:824
uint64_t CallInst * C
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
The CSE Analysis object.
Definition: CSEInfo.h:71
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
MachineInstrBuilder buildBrCond(Register Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildInsert(Register Res, Register Src, Register Op, unsigned Index)
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
iterator begin() const
Definition: ArrayRef.h:136
unsigned getScalarSizeInBits() const
unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
bool isScalar() const
GISelChangeObserver * Observer
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
unsigned Reg
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
virtual const TargetLowering * getTargetLowering() const
LLT getScalarType() const
Function Alias Analysis Results
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
void addSrcToMIB(MachineInstrBuilder &MIB) const
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:205
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
A debug info location.
Definition: DebugLoc.h:33
Metadata node.
Definition: Metadata.h:863
void buildSequence(Register Res, ArrayRef< Register > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
const fltSemantics & getSemantics() const
Definition: APFloat.h:1170
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, unsigned Align)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty, const LLT &Op1Ty)
MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res, CarryOut = G_UADDO Op0, Op1.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:142
LegalityPredicate isPointer(unsigned TypeIdx)
True iff the specified type index is a pointer (with any address space).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
Optional< MachineInstrBuilder > materializeGEP(Register &Res, Register Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher...
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
bool isVector() const
void setMF(MachineFunction &MF)
The address of a basic block.
Definition: Constants.h:839
A description of a memory reference used in the backend.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1, const SrcOp &CarryIn)
Build and insert Res, CarryOut = G_UADDE Op0, Op1, CarryIn.
void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend)
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineBasicBlock::iterator II
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
void recordInsertion(MachineInstr *MI) const
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:325
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, Optional< unsigned > Flags=None)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineFunction & getMF()
Getter for the function we currently build.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
virtual const TargetInstrInfo * getInstrInfo() const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:258
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void setChangeObserver(GISelChangeObserver &Observer)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
void validateShiftOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineRegisterInfo * getMRI()
Getter for MRI.
Abstract class that contains various methods for clients to notify about changes. ...
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_FPTRUNC Op.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
This is an important base class in LLVM.
Definition: Constant.h:41
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
virtual void createdInstr(MachineInstr &MI)=0
An instruction has been created and inserted into the function.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:607
DebugLoc DL
Debug location to be set to any instruction we create.
self_iterator getIterator()
Definition: ilist_node.h:81
unsigned getAddressSpace() const
MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_GEP Op0, Op1.
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
static wasm::ValType getType(const TargetRegisterClass *RC)
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
const APFloat & getValueAPF() const
Definition: Constants.h:302
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:244
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
iterator end() const
Definition: ArrayRef.h:137
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
const TargetInstrInfo & getTII()
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:653
LegalityPredicate isScalar(unsigned TypeIdx)
True iff the specified type index is a scalar.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:716
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
DWARF expression.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
void setCSEInfo(GISelCSEInfo *Info)
This file declares the MachineIRBuilder class.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, Optional< unsigned > Flags=None)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
bool isIntPredicate() const
Definition: InstrTypes.h:825
Class for arbitrary precision integers.
Definition: APInt.h:69
Register getReg() const
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
bool isPointer() const
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:255
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
Representation of each machine instruction.
Definition: MachineInstr.h:63
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
uint32_t Size
Definition: Profile.cpp:46
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
LLVM Value Representation.
Definition: Value.h:74
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
IRTranslator LLVM IR MI
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineFunction * MF
MachineFunction under construction.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:277
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
This file describes how to lower LLVM code to machine code.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.