LLVM  9.0.0svn
MachineIRBuilder.cpp
Go to the documentation of this file.
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
13 
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
27  State.MF = &MF;
28  State.MBB = nullptr;
29  State.MRI = &MF.getRegInfo();
30  State.TII = MF.getSubtarget().getInstrInfo();
31  State.DL = DebugLoc();
33  State.Observer = nullptr;
34 }
35 
37  State.MBB = &MBB;
38  State.II = MBB.end();
39  assert(&getMF() == MBB.getParent() &&
40  "Basic block is in a different function");
41 }
42 
44  assert(MI.getParent() && "Instruction is not part of a basic block");
45  setMBB(*MI.getParent());
46  State.II = MI.getIterator();
47 }
48 
50 
53  assert(MBB.getParent() == &getMF() &&
54  "Basic block is in a different function");
55  State.MBB = &MBB;
56  State.II = II;
57 }
58 
59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
60  if (State.Observer)
61  State.Observer->createdInstr(*InsertedInstr);
62 }
63 
65  State.Observer = &Observer;
66 }
67 
69 
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
73 
75  return insertInstr(buildInstrNoInsert(Opcode));
76 }
77 
79  MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
80  return MIB;
81 }
82 
84  getMBB().insert(getInsertPt(), MIB);
85  recordInsertion(MIB);
86  return MIB;
87 }
88 
91  const MDNode *Expr) {
92  assert(isa<DILocalVariable>(Variable) && "not a variable");
93  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
94  assert(
95  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
96  "Expected inlined-at fields to agree");
97  return insertInstr(BuildMI(getMF(), getDL(),
98  getTII().get(TargetOpcode::DBG_VALUE),
99  /*IsIndirect*/ false, Reg, Variable, Expr));
100 }
101 
104  const MDNode *Expr) {
105  assert(isa<DILocalVariable>(Variable) && "not a variable");
106  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
107  assert(
108  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
109  "Expected inlined-at fields to agree");
110  return insertInstr(BuildMI(getMF(), getDL(),
111  getTII().get(TargetOpcode::DBG_VALUE),
112  /*IsIndirect*/ true, Reg, Variable, Expr));
113 }
114 
116  const MDNode *Variable,
117  const MDNode *Expr) {
118  assert(isa<DILocalVariable>(Variable) && "not a variable");
119  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
120  assert(
121  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
122  "Expected inlined-at fields to agree");
123  return buildInstr(TargetOpcode::DBG_VALUE)
124  .addFrameIndex(FI)
125  .addImm(0)
126  .addMetadata(Variable)
127  .addMetadata(Expr);
128 }
129 
131  const MDNode *Variable,
132  const MDNode *Expr) {
133  assert(isa<DILocalVariable>(Variable) && "not a variable");
134  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
135  assert(
136  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
137  "Expected inlined-at fields to agree");
138  auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
139  if (auto *CI = dyn_cast<ConstantInt>(&C)) {
140  if (CI->getBitWidth() > 64)
141  MIB.addCImm(CI);
142  else
143  MIB.addImm(CI->getZExtValue());
144  } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
145  MIB.addFPImm(CFP);
146  } else {
147  // Insert %noreg if we didn't find a usable constant and had to drop it.
148  MIB.addReg(0U);
149  }
150 
151  return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
152 }
153 
155  assert(isa<DILabel>(Label) && "not a label");
156  assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
157  "Expected inlined-at fields to agree");
158  auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
159 
160  return MIB.addMetadata(Label);
161 }
162 
164  assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
165  return buildInstr(TargetOpcode::G_FRAME_INDEX)
166  .addDef(Res)
167  .addFrameIndex(Idx);
168 }
169 
171  const GlobalValue *GV) {
172  assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
173  assert(getMRI()->getType(Res).getAddressSpace() ==
174  GV->getType()->getAddressSpace() &&
175  "address space mismatch");
176 
177  return buildInstr(TargetOpcode::G_GLOBAL_VALUE)
178  .addDef(Res)
179  .addGlobalAddress(GV);
180 }
181 
183  unsigned JTI) {
184  return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
185  .addJumpTableIndex(JTI);
186 }
187 
188 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
189  const LLT &Op1) {
190  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
191  assert((Res == Op0 && Res == Op1) && "type mismatch");
192 }
193 
194 void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
195  const LLT &Op1) {
196  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
197  assert((Res == Op0) && "type mismatch");
198 }
199 
201  unsigned Op1) {
202  assert(getMRI()->getType(Res).isPointer() &&
203  getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
204  assert(getMRI()->getType(Op1).isScalar() && "invalid offset type");
205 
206  return buildInstr(TargetOpcode::G_GEP)
207  .addDef(Res)
208  .addUse(Op0)
209  .addUse(Op1);
210 }
211 
213 MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
214  const LLT &ValueTy, uint64_t Value) {
215  assert(Res == 0 && "Res is a result argument");
216  assert(ValueTy.isScalar() && "invalid offset type");
217 
218  if (Value == 0) {
219  Res = Op0;
220  return None;
221  }
222 
224  auto Cst = buildConstant(ValueTy, Value);
225  return buildGEP(Res, Op0, Cst.getReg(0));
226 }
227 
229  uint32_t NumBits) {
230  assert(getMRI()->getType(Res).isPointer() &&
231  getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
232 
233  return buildInstr(TargetOpcode::G_PTR_MASK)
234  .addDef(Res)
235  .addUse(Op0)
236  .addImm(NumBits);
237 }
238 
240  return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
241 }
242 
244  assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
245  return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
246 }
247 
249  unsigned JTI,
250  unsigned IndexReg) {
251  assert(getMRI()->getType(TablePtr).isPointer() &&
252  "Table reg must be a pointer");
253  return buildInstr(TargetOpcode::G_BRJT)
254  .addUse(TablePtr)
255  .addJumpTableIndex(JTI)
256  .addUse(IndexReg);
257 }
258 
260  const SrcOp &Op) {
261  return buildInstr(TargetOpcode::COPY, Res, Op);
262 }
263 
265  const ConstantInt &Val) {
266  LLT Ty = Res.getLLTTy(*getMRI());
267  LLT EltTy = Ty.getScalarType();
268  assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
269  "creating constant with the wrong size");
270 
271  if (Ty.isVector()) {
272  auto Const = buildInstr(TargetOpcode::G_CONSTANT)
273  .addDef(getMRI()->createGenericVirtualRegister(EltTy))
274  .addCImm(&Val);
275  return buildSplatVector(Res, Const);
276  }
277 
278  auto Const = buildInstr(TargetOpcode::G_CONSTANT);
279  Res.addDefToMIB(*getMRI(), Const);
280  Const.addCImm(&Val);
281  return Const;
282 }
283 
285  int64_t Val) {
286  auto IntN = IntegerType::get(getMF().getFunction().getContext(),
288  ConstantInt *CI = ConstantInt::get(IntN, Val, true);
289  return buildConstant(Res, *CI);
290 }
291 
293  const ConstantFP &Val) {
294  LLT Ty = Res.getLLTTy(*getMRI());
295  LLT EltTy = Ty.getScalarType();
296 
298  == EltTy.getSizeInBits() &&
299  "creating fconstant with the wrong size");
300 
301  assert(!Ty.isPointer() && "invalid operand type");
302 
303  if (Ty.isVector()) {
304  auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
305  .addDef(getMRI()->createGenericVirtualRegister(EltTy))
306  .addFPImm(&Val);
307 
308  return buildSplatVector(Res, Const);
309  }
310 
311  auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
312  Res.addDefToMIB(*getMRI(), Const);
313  Const.addFPImm(&Val);
314  return Const;
315 }
316 
318  const APInt &Val) {
319  ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
320  return buildConstant(Res, *CI);
321 }
322 
324  double Val) {
325  LLT DstTy = Res.getLLTTy(*getMRI());
326  auto &Ctx = getMF().getFunction().getContext();
327  auto *CFP =
329  return buildFConstant(Res, *CFP);
330 }
331 
333  const APFloat &Val) {
334  auto &Ctx = getMF().getFunction().getContext();
335  auto *CFP = ConstantFP::get(Ctx, Val);
336  return buildFConstant(Res, *CFP);
337 }
338 
340  MachineBasicBlock &Dest) {
341  assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
342 
343  return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
344 }
345 
347  MachineMemOperand &MMO) {
348  return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
349 }
350 
352  unsigned Res,
353  unsigned Addr,
354  MachineMemOperand &MMO) {
355  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
356  assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
357 
358  return buildInstr(Opcode)
359  .addDef(Res)
360  .addUse(Addr)
361  .addMemOperand(&MMO);
362 }
363 
365  MachineMemOperand &MMO) {
366  assert(getMRI()->getType(Val).isValid() && "invalid operand type");
367  assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
368 
369  return buildInstr(TargetOpcode::G_STORE)
370  .addUse(Val)
371  .addUse(Addr)
372  .addMemOperand(&MMO);
373 }
374 
376  const DstOp &CarryOut,
377  const SrcOp &Op0,
378  const SrcOp &Op1) {
379  return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
380 }
381 
383  const DstOp &CarryOut,
384  const SrcOp &Op0,
385  const SrcOp &Op1,
386  const SrcOp &CarryIn) {
387  return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
388  {Op0, Op1, CarryIn});
389 }
390 
392  const SrcOp &Op) {
393  return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
394 }
395 
397  const SrcOp &Op) {
398  return buildInstr(TargetOpcode::G_SEXT, Res, Op);
399 }
400 
402  const SrcOp &Op) {
403  return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
404 }
405 
406 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
407  const auto *TLI = getMF().getSubtarget().getTargetLowering();
408  switch (TLI->getBooleanContents(IsVec, IsFP)) {
410  return TargetOpcode::G_SEXT;
412  return TargetOpcode::G_ZEXT;
413  default:
414  return TargetOpcode::G_ANYEXT;
415  }
416 }
417 
419  const SrcOp &Op,
420  bool IsFP) {
421  unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
422  return buildInstr(ExtOp, Res, Op);
423 }
424 
426  const DstOp &Res,
427  const SrcOp &Op) {
428  assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
429  TargetOpcode::G_SEXT == ExtOpc) &&
430  "Expecting Extending Opc");
431  assert(Res.getLLTTy(*getMRI()).isScalar() ||
432  Res.getLLTTy(*getMRI()).isVector());
433  assert(Res.getLLTTy(*getMRI()).isScalar() ==
434  Op.getLLTTy(*getMRI()).isScalar());
435 
436  unsigned Opcode = TargetOpcode::COPY;
437  if (Res.getLLTTy(*getMRI()).getSizeInBits() >
438  Op.getLLTTy(*getMRI()).getSizeInBits())
439  Opcode = ExtOpc;
440  else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
441  Op.getLLTTy(*getMRI()).getSizeInBits())
442  Opcode = TargetOpcode::G_TRUNC;
443  else
444  assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
445 
446  return buildInstr(Opcode, Res, Op);
447 }
448 
450  const SrcOp &Op) {
451  return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
452 }
453 
455  const SrcOp &Op) {
456  return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
457 }
458 
460  const SrcOp &Op) {
461  return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
462 }
463 
465  const SrcOp &Src) {
466  LLT SrcTy = Src.getLLTTy(*getMRI());
467  LLT DstTy = Dst.getLLTTy(*getMRI());
468  if (SrcTy == DstTy)
469  return buildCopy(Dst, Src);
470 
471  unsigned Opcode;
472  if (SrcTy.isPointer() && DstTy.isScalar())
473  Opcode = TargetOpcode::G_PTRTOINT;
474  else if (DstTy.isPointer() && SrcTy.isScalar())
475  Opcode = TargetOpcode::G_INTTOPTR;
476  else {
477  assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
478  Opcode = TargetOpcode::G_BITCAST;
479  }
480 
481  return buildInstr(Opcode, Dst, Src);
482 }
483 
485  const SrcOp &Src,
486  uint64_t Index) {
487  LLT SrcTy = Src.getLLTTy(*getMRI());
488  LLT DstTy = Dst.getLLTTy(*getMRI());
489 
490 #ifndef NDEBUG
491  assert(SrcTy.isValid() && "invalid operand type");
492  assert(DstTy.isValid() && "invalid operand type");
493  assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
494  "extracting off end of register");
495 #endif
496 
497  if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
498  assert(Index == 0 && "insertion past the end of a register");
499  return buildCast(Dst, Src);
500  }
501 
502  auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
503  Dst.addDefToMIB(*getMRI(), Extract);
504  Src.addSrcToMIB(Extract);
505  Extract.addImm(Index);
506  return Extract;
507 }
508 
510  ArrayRef<uint64_t> Indices) {
511 #ifndef NDEBUG
512  assert(Ops.size() == Indices.size() && "incompatible args");
513  assert(!Ops.empty() && "invalid trivial sequence");
514  assert(std::is_sorted(Indices.begin(), Indices.end()) &&
515  "sequence offsets must be in ascending order");
516 
517  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
518  for (auto Op : Ops)
519  assert(getMRI()->getType(Op).isValid() && "invalid operand type");
520 #endif
521 
522  LLT ResTy = getMRI()->getType(Res);
523  LLT OpTy = getMRI()->getType(Ops[0]);
524  unsigned OpSize = OpTy.getSizeInBits();
525  bool MaybeMerge = true;
526  for (unsigned i = 0; i < Ops.size(); ++i) {
527  if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
528  MaybeMerge = false;
529  break;
530  }
531  }
532 
533  if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
534  buildMerge(Res, Ops);
535  return;
536  }
537 
538  unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
539  buildUndef(ResIn);
540 
541  for (unsigned i = 0; i < Ops.size(); ++i) {
542  unsigned ResOut = i + 1 == Ops.size()
543  ? Res
545  buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
546  ResIn = ResOut;
547  }
548 }
549 
551  return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
552 }
553 
555  ArrayRef<unsigned> Ops) {
556  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
557  // we need some temporary storage for the DstOp objects. Here we use a
558  // sufficiently large SmallVector to not go through the heap.
559  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
560  return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
561 }
562 
564  const SrcOp &Op) {
565  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
566  // we need some temporary storage for the DstOp objects. Here we use a
567  // sufficiently large SmallVector to not go through the heap.
568  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
569  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
570 }
571 
573  const SrcOp &Op) {
574  unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
576  for (unsigned I = 0; I != NumReg; ++I)
577  TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
578  return buildUnmerge(TmpVec, Op);
579 }
580 
582  const SrcOp &Op) {
583  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
584  // we need some temporary storage for the DstOp objects. Here we use a
585  // sufficiently large SmallVector to not go through the heap.
586  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
587  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
588 }
589 
591  ArrayRef<unsigned> Ops) {
592  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
593  // we need some temporary storage for the DstOp objects. Here we use a
594  // sufficiently large SmallVector to not go through the heap.
595  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
596  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
597 }
598 
600  const SrcOp &Src) {
601  SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
602  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
603 }
604 
607  ArrayRef<unsigned> Ops) {
608  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
609  // we need some temporary storage for the DstOp objects. Here we use a
610  // sufficiently large SmallVector to not go through the heap.
611  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
612  return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
613 }
614 
617  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
618  // we need some temporary storage for the DstOp objects. Here we use a
619  // sufficiently large SmallVector to not go through the heap.
620  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
621  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
622 }
623 
625  unsigned Op, unsigned Index) {
626  assert(Index + getMRI()->getType(Op).getSizeInBits() <=
627  getMRI()->getType(Res).getSizeInBits() &&
628  "insertion past the end of a register");
629 
630  if (getMRI()->getType(Res).getSizeInBits() ==
631  getMRI()->getType(Op).getSizeInBits()) {
632  return buildCast(Res, Op);
633  }
634 
635  return buildInstr(TargetOpcode::G_INSERT)
636  .addDef(Res)
637  .addUse(Src)
638  .addUse(Op)
639  .addImm(Index);
640 }
641 
643  ArrayRef<unsigned> ResultRegs,
644  bool HasSideEffects) {
645  auto MIB =
646  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
647  : TargetOpcode::G_INTRINSIC);
648  for (unsigned ResultReg : ResultRegs)
649  MIB.addDef(ResultReg);
650  MIB.addIntrinsicID(ID);
651  return MIB;
652 }
653 
656  bool HasSideEffects) {
657  auto MIB =
658  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
659  : TargetOpcode::G_INTRINSIC);
660  for (DstOp Result : Results)
661  Result.addDefToMIB(*getMRI(), MIB);
662  MIB.addIntrinsicID(ID);
663  return MIB;
664 }
665 
667  const SrcOp &Op) {
668  return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
669 }
670 
672  const SrcOp &Op) {
673  return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
674 }
675 
677  const DstOp &Res,
678  const SrcOp &Op0,
679  const SrcOp &Op1) {
680  return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
681 }
682 
684  const DstOp &Res,
685  const SrcOp &Op0,
686  const SrcOp &Op1) {
687 
688  return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1});
689 }
690 
692  const SrcOp &Tst,
693  const SrcOp &Op0,
694  const SrcOp &Op1) {
695 
696  return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1});
697 }
698 
701  const SrcOp &Elt, const SrcOp &Idx) {
702  return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
703 }
704 
707  const SrcOp &Idx) {
708  return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
709 }
710 
712  unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal,
713  unsigned NewVal, MachineMemOperand &MMO) {
714 #ifndef NDEBUG
715  LLT OldValResTy = getMRI()->getType(OldValRes);
716  LLT SuccessResTy = getMRI()->getType(SuccessRes);
717  LLT AddrTy = getMRI()->getType(Addr);
718  LLT CmpValTy = getMRI()->getType(CmpVal);
719  LLT NewValTy = getMRI()->getType(NewVal);
720  assert(OldValResTy.isScalar() && "invalid operand type");
721  assert(SuccessResTy.isScalar() && "invalid operand type");
722  assert(AddrTy.isPointer() && "invalid operand type");
723  assert(CmpValTy.isValid() && "invalid operand type");
724  assert(NewValTy.isValid() && "invalid operand type");
725  assert(OldValResTy == CmpValTy && "type mismatch");
726  assert(OldValResTy == NewValTy && "type mismatch");
727 #endif
728 
729  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
730  .addDef(OldValRes)
731  .addDef(SuccessRes)
732  .addUse(Addr)
733  .addUse(CmpVal)
734  .addUse(NewVal)
735  .addMemOperand(&MMO);
736 }
737 
739 MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
740  unsigned CmpVal, unsigned NewVal,
741  MachineMemOperand &MMO) {
742 #ifndef NDEBUG
743  LLT OldValResTy = getMRI()->getType(OldValRes);
744  LLT AddrTy = getMRI()->getType(Addr);
745  LLT CmpValTy = getMRI()->getType(CmpVal);
746  LLT NewValTy = getMRI()->getType(NewVal);
747  assert(OldValResTy.isScalar() && "invalid operand type");
748  assert(AddrTy.isPointer() && "invalid operand type");
749  assert(CmpValTy.isValid() && "invalid operand type");
750  assert(NewValTy.isValid() && "invalid operand type");
751  assert(OldValResTy == CmpValTy && "type mismatch");
752  assert(OldValResTy == NewValTy && "type mismatch");
753 #endif
754 
755  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
756  .addDef(OldValRes)
757  .addUse(Addr)
758  .addUse(CmpVal)
759  .addUse(NewVal)
760  .addMemOperand(&MMO);
761 }
762 
764  unsigned OldValRes,
765  unsigned Addr,
766  unsigned Val,
767  MachineMemOperand &MMO) {
768 #ifndef NDEBUG
769  LLT OldValResTy = getMRI()->getType(OldValRes);
770  LLT AddrTy = getMRI()->getType(Addr);
771  LLT ValTy = getMRI()->getType(Val);
772  assert(OldValResTy.isScalar() && "invalid operand type");
773  assert(AddrTy.isPointer() && "invalid operand type");
774  assert(ValTy.isValid() && "invalid operand type");
775  assert(OldValResTy == ValTy && "type mismatch");
776 #endif
777 
778  return buildInstr(Opcode)
779  .addDef(OldValRes)
780  .addUse(Addr)
781  .addUse(Val)
782  .addMemOperand(&MMO);
783 }
784 
786 MachineIRBuilder::buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
787  unsigned Val, MachineMemOperand &MMO) {
788  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
789  MMO);
790 }
792 MachineIRBuilder::buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
793  unsigned Val, MachineMemOperand &MMO) {
794  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
795  MMO);
796 }
798 MachineIRBuilder::buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
799  unsigned Val, MachineMemOperand &MMO) {
800  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
801  MMO);
802 }
804 MachineIRBuilder::buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
805  unsigned Val, MachineMemOperand &MMO) {
806  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
807  MMO);
808 }
810 MachineIRBuilder::buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
811  unsigned Val, MachineMemOperand &MMO) {
812  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
813  MMO);
814 }
816  unsigned Addr,
817  unsigned Val,
818  MachineMemOperand &MMO) {
819  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
820  MMO);
821 }
823 MachineIRBuilder::buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
824  unsigned Val, MachineMemOperand &MMO) {
825  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
826  MMO);
827 }
829 MachineIRBuilder::buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
830  unsigned Val, MachineMemOperand &MMO) {
831  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
832  MMO);
833 }
835 MachineIRBuilder::buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
836  unsigned Val, MachineMemOperand &MMO) {
837  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
838  MMO);
839 }
841 MachineIRBuilder::buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
842  unsigned Val, MachineMemOperand &MMO) {
843  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
844  MMO);
845 }
847 MachineIRBuilder::buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
848  unsigned Val, MachineMemOperand &MMO) {
849  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
850  MMO);
851 }
852 
855 #ifndef NDEBUG
856  assert(getMRI()->getType(Res).isPointer() && "invalid res type");
857 #endif
858 
859  return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
860 }
861 
862 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
863  bool IsExtend) {
864 #ifndef NDEBUG
865  if (DstTy.isVector()) {
866  assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
867  assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
868  "different number of elements in a trunc/ext");
869  } else
870  assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
871 
872  if (IsExtend)
873  assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
874  "invalid narrowing extend");
875  else
876  assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
877  "invalid widening trunc");
878 #endif
879 }
880 
881 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
882  const LLT &Op0Ty, const LLT &Op1Ty) {
883 #ifndef NDEBUG
884  assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
885  "invalid operand type");
886  assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
887  if (ResTy.isScalar() || ResTy.isPointer())
888  assert(TstTy.isScalar() && "type mismatch");
889  else
890  assert((TstTy.isScalar() ||
891  (TstTy.isVector() &&
892  TstTy.getNumElements() == Op0Ty.getNumElements())) &&
893  "type mismatch");
894 #endif
895 }
896 
898  ArrayRef<DstOp> DstOps,
899  ArrayRef<SrcOp> SrcOps,
900  Optional<unsigned> Flags) {
901  switch (Opc) {
902  default:
903  break;
904  case TargetOpcode::G_SELECT: {
905  assert(DstOps.size() == 1 && "Invalid select");
906  assert(SrcOps.size() == 3 && "Invalid select");
908  DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
909  SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
910  break;
911  }
912  case TargetOpcode::G_ADD:
913  case TargetOpcode::G_AND:
914  case TargetOpcode::G_MUL:
915  case TargetOpcode::G_OR:
916  case TargetOpcode::G_SUB:
917  case TargetOpcode::G_XOR:
918  case TargetOpcode::G_UDIV:
919  case TargetOpcode::G_SDIV:
920  case TargetOpcode::G_UREM:
921  case TargetOpcode::G_SREM:
922  case TargetOpcode::G_SMIN:
923  case TargetOpcode::G_SMAX:
924  case TargetOpcode::G_UMIN:
925  case TargetOpcode::G_UMAX: {
926  // All these are binary ops.
927  assert(DstOps.size() == 1 && "Invalid Dst");
928  assert(SrcOps.size() == 2 && "Invalid Srcs");
929  validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
930  SrcOps[0].getLLTTy(*getMRI()),
931  SrcOps[1].getLLTTy(*getMRI()));
932  break;
933  }
934  case TargetOpcode::G_SHL:
935  case TargetOpcode::G_ASHR:
936  case TargetOpcode::G_LSHR: {
937  assert(DstOps.size() == 1 && "Invalid Dst");
938  assert(SrcOps.size() == 2 && "Invalid Srcs");
939  validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
940  SrcOps[0].getLLTTy(*getMRI()),
941  SrcOps[1].getLLTTy(*getMRI()));
942  break;
943  }
944  case TargetOpcode::G_SEXT:
945  case TargetOpcode::G_ZEXT:
946  case TargetOpcode::G_ANYEXT:
947  assert(DstOps.size() == 1 && "Invalid Dst");
948  assert(SrcOps.size() == 1 && "Invalid Srcs");
949  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
950  SrcOps[0].getLLTTy(*getMRI()), true);
951  break;
952  case TargetOpcode::G_TRUNC:
953  case TargetOpcode::G_FPTRUNC: {
954  assert(DstOps.size() == 1 && "Invalid Dst");
955  assert(SrcOps.size() == 1 && "Invalid Srcs");
956  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
957  SrcOps[0].getLLTTy(*getMRI()), false);
958  break;
959  }
960  case TargetOpcode::COPY:
961  assert(DstOps.size() == 1 && "Invalid Dst");
962  // If the caller wants to add a subreg source it has to be done separately
963  // so we may not have any SrcOps at this point yet.
964  break;
965  case TargetOpcode::G_FCMP:
966  case TargetOpcode::G_ICMP: {
967  assert(DstOps.size() == 1 && "Invalid Dst Operands");
968  assert(SrcOps.size() == 3 && "Invalid Src Operands");
969  // For F/ICMP, the first src operand is the predicate, followed by
970  // the two comparands.
971  assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
972  "Expecting predicate");
973  assert([&]() -> bool {
974  CmpInst::Predicate Pred = SrcOps[0].getPredicate();
975  return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
976  : CmpInst::isFPPredicate(Pred);
977  }() && "Invalid predicate");
978  assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
979  "Type mismatch");
980  assert([&]() -> bool {
981  LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
982  LLT DstTy = DstOps[0].getLLTTy(*getMRI());
983  if (Op0Ty.isScalar() || Op0Ty.isPointer())
984  return DstTy.isScalar();
985  else
986  return DstTy.isVector() &&
987  DstTy.getNumElements() == Op0Ty.getNumElements();
988  }() && "Type Mismatch");
989  break;
990  }
991  case TargetOpcode::G_UNMERGE_VALUES: {
992  assert(!DstOps.empty() && "Invalid trivial sequence");
993  assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
994  assert(std::all_of(DstOps.begin(), DstOps.end(),
995  [&, this](const DstOp &Op) {
996  return Op.getLLTTy(*getMRI()) ==
997  DstOps[0].getLLTTy(*getMRI());
998  }) &&
999  "type mismatch in output list");
1000  assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1001  SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1002  "input operands do not cover output register");
1003  break;
1004  }
1005  case TargetOpcode::G_MERGE_VALUES: {
1006  assert(!SrcOps.empty() && "invalid trivial sequence");
1007  assert(DstOps.size() == 1 && "Invalid Dst");
1008  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1009  [&, this](const SrcOp &Op) {
1010  return Op.getLLTTy(*getMRI()) ==
1011  SrcOps[0].getLLTTy(*getMRI());
1012  }) &&
1013  "type mismatch in input list");
1014  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1015  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1016  "input operands do not cover output register");
1017  if (SrcOps.size() == 1)
1018  return buildCast(DstOps[0], SrcOps[0]);
1019  if (DstOps[0].getLLTTy(*getMRI()).isVector())
1020  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1021  break;
1022  }
1023  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1024  assert(DstOps.size() == 1 && "Invalid Dst size");
1025  assert(SrcOps.size() == 2 && "Invalid Src size");
1026  assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1027  assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1028  DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1029  "Invalid operand type");
1030  assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1031  assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1032  DstOps[0].getLLTTy(*getMRI()) &&
1033  "Type mismatch");
1034  break;
1035  }
1036  case TargetOpcode::G_INSERT_VECTOR_ELT: {
1037  assert(DstOps.size() == 1 && "Invalid dst size");
1038  assert(SrcOps.size() == 3 && "Invalid src size");
1039  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1040  SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1041  assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1042  SrcOps[1].getLLTTy(*getMRI()) &&
1043  "Type mismatch");
1044  assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1045  assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1046  SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1047  "Type mismatch");
1048  break;
1049  }
1050  case TargetOpcode::G_BUILD_VECTOR: {
1051  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1052  "Must have at least 2 operands");
1053  assert(DstOps.size() == 1 && "Invalid DstOps");
1054  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1055  "Res type must be a vector");
1056  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1057  [&, this](const SrcOp &Op) {
1058  return Op.getLLTTy(*getMRI()) ==
1059  SrcOps[0].getLLTTy(*getMRI());
1060  }) &&
1061  "type mismatch in input list");
1062  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1063  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1064  "input scalars do not exactly cover the output vector register");
1065  break;
1066  }
1067  case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1068  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1069  "Must have at least 2 operands");
1070  assert(DstOps.size() == 1 && "Invalid DstOps");
1071  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1072  "Res type must be a vector");
1073  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1074  [&, this](const SrcOp &Op) {
1075  return Op.getLLTTy(*getMRI()) ==
1076  SrcOps[0].getLLTTy(*getMRI());
1077  }) &&
1078  "type mismatch in input list");
1079  if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1080  DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1081  return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1082  break;
1083  }
1084  case TargetOpcode::G_CONCAT_VECTORS: {
1085  assert(DstOps.size() == 1 && "Invalid DstOps");
1086  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1087  "Must have at least 2 operands");
1088  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1089  [&, this](const SrcOp &Op) {
1090  return (Op.getLLTTy(*getMRI()).isVector() &&
1091  Op.getLLTTy(*getMRI()) ==
1092  SrcOps[0].getLLTTy(*getMRI()));
1093  }) &&
1094  "type mismatch in input list");
1095  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1096  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1097  "input vectors do not exactly cover the output vector register");
1098  break;
1099  }
1100  case TargetOpcode::G_UADDE: {
1101  assert(DstOps.size() == 2 && "Invalid no of dst operands");
1102  assert(SrcOps.size() == 3 && "Invalid no of src operands");
1103  assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1104  assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1105  (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1106  "Invalid operand");
1107  assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1108  assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1109  "type mismatch");
1110  break;
1111  }
1112  }
1113 
1114  auto MIB = buildInstr(Opc);
1115  for (const DstOp &Op : DstOps)
1116  Op.addDefToMIB(*getMRI(), MIB);
1117  for (const SrcOp &Op : SrcOps)
1118  Op.addSrcToMIB(MIB);
1119  if (Flags)
1120  MIB->setFlags(*Flags);
1121  return MIB;
1122 }
bool isFPPredicate() const
Definition: InstrTypes.h:824
uint64_t CallInst * C
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
The CSE Analysis object.
Definition: CSEInfo.h:71
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
iterator begin() const
Definition: ArrayRef.h:136
unsigned getScalarSizeInBits() const
void push_back(const T &Elt)
Definition: SmallVector.h:211
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< unsigned > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
bool isScalar() const
MachineInstrBuilder buildAtomicRMWSub(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
GISelChangeObserver * Observer
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
unsigned Reg
virtual const TargetLowering * getTargetLowering() const
MachineInstrBuilder buildAtomicRMWXor(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
LLT getScalarType() const
Function Alias Analysis Results
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
void addSrcToMIB(MachineInstrBuilder &MIB) const
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:205
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1192
A debug info location.
Definition: DebugLoc.h:33
Metadata node.
Definition: Metadata.h:863
const fltSemantics & getSemantics() const
Definition: APFloat.h:1165
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty, const LLT &Op1Ty)
MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res, CarryOut = G_UADDO Op0, Op1.
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:142
LegalityPredicate isPointer(unsigned TypeIdx)
True iff the specified type index is a pointer (with any address space).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
MachineInstrBuilder buildAtomicRMWNand(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool isVector() const
void setMF(MachineFunction &MF)
The address of a basic block.
Definition: Constants.h:839
MachineInstrBuilder buildBlockAddress(unsigned Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
A description of a memory reference used in the backend.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1, const SrcOp &CarryIn)
Build and insert Res, CarryOut = G_UADDE Op0, Op1, CarryIn.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend)
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineBasicBlock::iterator II
MachineInstrBuilder buildBrJT(unsigned TablePtr, unsigned JTI, unsigned IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
void recordInsertion(MachineInstr *MI) const
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:300
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineFunction & getMF()
Getter for the function we currently build.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
MachineInstrBuilder buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
virtual const TargetInstrInfo * getInstrInfo() const
MachineInstrBuilder buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:258
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void setChangeObserver(GISelChangeObserver &Observer)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
void validateShiftOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineRegisterInfo * getMRI()
Getter for MRI.
Abstract class that contains various methods for clients to notify about changes. ...
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_FPTRUNC Op.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned char TargetFlags=0) const
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
unsigned getReg() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
This is an important base class in LLVM.
Definition: Constant.h:41
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
virtual void createdInstr(MachineInstr &MI)=0
An instruction has been created and inserted into the function.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
MachineInstrBuilder buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:572
DebugLoc DL
Debug location to be set to any instruction we create.
self_iterator getIterator()
Definition: ilist_node.h:81
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
static wasm::ValType getType(const TargetRegisterClass *RC)
MachineInstrBuilder buildLoadInstr(unsigned Opcode, unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
const APFloat & getValueAPF() const
Definition: Constants.h:302
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:239
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
void buildSequence(unsigned Res, ArrayRef< unsigned > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
iterator end() const
Definition: ArrayRef.h:137
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
const TargetInstrInfo & getTII()
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
LegalityPredicate isScalar(unsigned TypeIdx)
True iff the specified type index is a scalar.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:694
LLT getLLTTy(const MachineRegisterInfo &MRI) const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
void setCSEInfo(GISelCSEInfo *Info)
This file declares the MachineIRBuilder class.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
bool isIntPredicate() const
Definition: InstrTypes.h:825
Class for arbitrary precision integers.
Definition: APInt.h:69
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:255
Representation of each machine instruction.
Definition: MachineInstr.h:63
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
LLVM Value Representation.
Definition: Value.h:72
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
MachineInstrBuilder buildAtomicRMWMin(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineFunction * MF
MachineFunction under construction.
MachineInstrBuilder buildAtomicRMWOr(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildAtomicRMWMax(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:277
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
This file describes how to lower LLVM code to machine code.
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.