LLVM  9.0.0svn
MachineIRBuilder.cpp
Go to the documentation of this file.
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
13 
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
27  State.MF = &MF;
28  State.MBB = nullptr;
29  State.MRI = &MF.getRegInfo();
30  State.TII = MF.getSubtarget().getInstrInfo();
31  State.DL = DebugLoc();
33  State.Observer = nullptr;
34 }
35 
37  State.MBB = &MBB;
38  State.II = MBB.end();
39  assert(&getMF() == MBB.getParent() &&
40  "Basic block is in a different function");
41 }
42 
44  assert(MI.getParent() && "Instruction is not part of a basic block");
45  setMBB(*MI.getParent());
46  State.II = MI.getIterator();
47 }
48 
50 
53  assert(MBB.getParent() == &getMF() &&
54  "Basic block is in a different function");
55  State.MBB = &MBB;
56  State.II = II;
57 }
58 
59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
60  if (State.Observer)
61  State.Observer->createdInstr(*InsertedInstr);
62 }
63 
65  State.Observer = &Observer;
66 }
67 
69 
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
73 
75  return insertInstr(buildInstrNoInsert(Opcode));
76 }
77 
79  MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
80  return MIB;
81 }
82 
84  getMBB().insert(getInsertPt(), MIB);
85  recordInsertion(MIB);
86  return MIB;
87 }
88 
91  const MDNode *Expr) {
92  assert(isa<DILocalVariable>(Variable) && "not a variable");
93  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
94  assert(
95  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
96  "Expected inlined-at fields to agree");
97  return insertInstr(BuildMI(getMF(), getDL(),
98  getTII().get(TargetOpcode::DBG_VALUE),
99  /*IsIndirect*/ false, Reg, Variable, Expr));
100 }
101 
104  const MDNode *Expr) {
105  assert(isa<DILocalVariable>(Variable) && "not a variable");
106  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
107  assert(
108  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
109  "Expected inlined-at fields to agree");
110  return insertInstr(BuildMI(getMF(), getDL(),
111  getTII().get(TargetOpcode::DBG_VALUE),
112  /*IsIndirect*/ true, Reg, Variable, Expr));
113 }
114 
116  const MDNode *Variable,
117  const MDNode *Expr) {
118  assert(isa<DILocalVariable>(Variable) && "not a variable");
119  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
120  assert(
121  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
122  "Expected inlined-at fields to agree");
123  return buildInstr(TargetOpcode::DBG_VALUE)
124  .addFrameIndex(FI)
125  .addImm(0)
126  .addMetadata(Variable)
127  .addMetadata(Expr);
128 }
129 
131  const MDNode *Variable,
132  const MDNode *Expr) {
133  assert(isa<DILocalVariable>(Variable) && "not a variable");
134  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
135  assert(
136  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
137  "Expected inlined-at fields to agree");
138  auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
139  if (auto *CI = dyn_cast<ConstantInt>(&C)) {
140  if (CI->getBitWidth() > 64)
141  MIB.addCImm(CI);
142  else
143  MIB.addImm(CI->getZExtValue());
144  } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
145  MIB.addFPImm(CFP);
146  } else {
147  // Insert %noreg if we didn't find a usable constant and had to drop it.
148  MIB.addReg(0U);
149  }
150 
151  return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
152 }
153 
155  assert(isa<DILabel>(Label) && "not a label");
156  assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
157  "Expected inlined-at fields to agree");
158  auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
159 
160  return MIB.addMetadata(Label);
161 }
162 
164  int Idx) {
165  assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
166  auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
167  Res.addDefToMIB(*getMRI(), MIB);
168  MIB.addFrameIndex(Idx);
169  return MIB;
170 }
171 
173  const GlobalValue *GV) {
174  assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176  GV->getType()->getAddressSpace() &&
177  "address space mismatch");
178 
179  auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
180  Res.addDefToMIB(*getMRI(), MIB);
181  MIB.addGlobalAddress(GV);
182  return MIB;
183 }
184 
186  unsigned JTI) {
187  return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
188  .addJumpTableIndex(JTI);
189 }
190 
191 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
192  const LLT &Op1) {
193  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
194  assert((Res == Op0 && Res == Op1) && "type mismatch");
195 }
196 
197 void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
198  const LLT &Op1) {
199  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
200  assert((Res == Op0) && "type mismatch");
201 }
202 
204  const SrcOp &Op0,
205  const SrcOp &Op1) {
206  assert(Res.getLLTTy(*getMRI()).isPointer() &&
207  Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
208  assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
209 
210  auto MIB = buildInstr(TargetOpcode::G_GEP);
211  Res.addDefToMIB(*getMRI(), MIB);
212  Op0.addSrcToMIB(MIB);
213  Op1.addSrcToMIB(MIB);
214  return MIB;
215 }
216 
219  const LLT &ValueTy, uint64_t Value) {
220  assert(Res == 0 && "Res is a result argument");
221  assert(ValueTy.isScalar() && "invalid offset type");
222 
223  if (Value == 0) {
224  Res = Op0;
225  return None;
226  }
227 
229  auto Cst = buildConstant(ValueTy, Value);
230  return buildGEP(Res, Op0, Cst.getReg(0));
231 }
232 
234  const SrcOp &Op0,
235  uint32_t NumBits) {
236  assert(Res.getLLTTy(*getMRI()).isPointer() &&
237  Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
238 
239  auto MIB = buildInstr(TargetOpcode::G_PTR_MASK);
240  Res.addDefToMIB(*getMRI(), MIB);
241  Op0.addSrcToMIB(MIB);
242  MIB.addImm(NumBits);
243  return MIB;
244 }
245 
247  return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
248 }
249 
251  assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
252  return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
253 }
254 
256  unsigned JTI,
257  Register IndexReg) {
258  assert(getMRI()->getType(TablePtr).isPointer() &&
259  "Table reg must be a pointer");
260  return buildInstr(TargetOpcode::G_BRJT)
261  .addUse(TablePtr)
262  .addJumpTableIndex(JTI)
263  .addUse(IndexReg);
264 }
265 
267  const SrcOp &Op) {
268  return buildInstr(TargetOpcode::COPY, Res, Op);
269 }
270 
272  const ConstantInt &Val) {
273  LLT Ty = Res.getLLTTy(*getMRI());
274  LLT EltTy = Ty.getScalarType();
275  assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
276  "creating constant with the wrong size");
277 
278  if (Ty.isVector()) {
279  auto Const = buildInstr(TargetOpcode::G_CONSTANT)
280  .addDef(getMRI()->createGenericVirtualRegister(EltTy))
281  .addCImm(&Val);
282  return buildSplatVector(Res, Const);
283  }
284 
285  auto Const = buildInstr(TargetOpcode::G_CONSTANT);
286  Res.addDefToMIB(*getMRI(), Const);
287  Const.addCImm(&Val);
288  return Const;
289 }
290 
292  int64_t Val) {
293  auto IntN = IntegerType::get(getMF().getFunction().getContext(),
295  ConstantInt *CI = ConstantInt::get(IntN, Val, true);
296  return buildConstant(Res, *CI);
297 }
298 
300  const ConstantFP &Val) {
301  LLT Ty = Res.getLLTTy(*getMRI());
302  LLT EltTy = Ty.getScalarType();
303 
305  == EltTy.getSizeInBits() &&
306  "creating fconstant with the wrong size");
307 
308  assert(!Ty.isPointer() && "invalid operand type");
309 
310  if (Ty.isVector()) {
311  auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
312  .addDef(getMRI()->createGenericVirtualRegister(EltTy))
313  .addFPImm(&Val);
314 
315  return buildSplatVector(Res, Const);
316  }
317 
318  auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
319  Res.addDefToMIB(*getMRI(), Const);
320  Const.addFPImm(&Val);
321  return Const;
322 }
323 
325  const APInt &Val) {
326  ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
327  return buildConstant(Res, *CI);
328 }
329 
331  double Val) {
332  LLT DstTy = Res.getLLTTy(*getMRI());
333  auto &Ctx = getMF().getFunction().getContext();
334  auto *CFP =
336  return buildFConstant(Res, *CFP);
337 }
338 
340  const APFloat &Val) {
341  auto &Ctx = getMF().getFunction().getContext();
342  auto *CFP = ConstantFP::get(Ctx, Val);
343  return buildFConstant(Res, *CFP);
344 }
345 
347  MachineBasicBlock &Dest) {
348  assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
349 
350  return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
351 }
352 
354  const SrcOp &Addr,
355  MachineMemOperand &MMO) {
356  return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
357 }
358 
360  const DstOp &Res,
361  const SrcOp &Addr,
362  MachineMemOperand &MMO) {
363  assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
364  assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
365 
366  auto MIB = buildInstr(Opcode);
367  Res.addDefToMIB(*getMRI(), MIB);
368  Addr.addSrcToMIB(MIB);
369  MIB.addMemOperand(&MMO);
370  return MIB;
371 }
372 
374  const SrcOp &Addr,
375  MachineMemOperand &MMO) {
376  assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
377  assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
378 
379  auto MIB = buildInstr(TargetOpcode::G_STORE);
380  Val.addSrcToMIB(MIB);
381  Addr.addSrcToMIB(MIB);
382  MIB.addMemOperand(&MMO);
383  return MIB;
384 }
385 
387  const DstOp &CarryOut,
388  const SrcOp &Op0,
389  const SrcOp &Op1) {
390  return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
391 }
392 
394  const DstOp &CarryOut,
395  const SrcOp &Op0,
396  const SrcOp &Op1,
397  const SrcOp &CarryIn) {
398  return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
399  {Op0, Op1, CarryIn});
400 }
401 
403  const SrcOp &Op) {
404  return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
405 }
406 
408  const SrcOp &Op) {
409  return buildInstr(TargetOpcode::G_SEXT, Res, Op);
410 }
411 
413  const SrcOp &Op) {
414  return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
415 }
416 
417 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
418  const auto *TLI = getMF().getSubtarget().getTargetLowering();
419  switch (TLI->getBooleanContents(IsVec, IsFP)) {
421  return TargetOpcode::G_SEXT;
423  return TargetOpcode::G_ZEXT;
424  default:
425  return TargetOpcode::G_ANYEXT;
426  }
427 }
428 
430  const SrcOp &Op,
431  bool IsFP) {
432  unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
433  return buildInstr(ExtOp, Res, Op);
434 }
435 
437  const DstOp &Res,
438  const SrcOp &Op) {
439  assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
440  TargetOpcode::G_SEXT == ExtOpc) &&
441  "Expecting Extending Opc");
442  assert(Res.getLLTTy(*getMRI()).isScalar() ||
443  Res.getLLTTy(*getMRI()).isVector());
444  assert(Res.getLLTTy(*getMRI()).isScalar() ==
445  Op.getLLTTy(*getMRI()).isScalar());
446 
447  unsigned Opcode = TargetOpcode::COPY;
448  if (Res.getLLTTy(*getMRI()).getSizeInBits() >
449  Op.getLLTTy(*getMRI()).getSizeInBits())
450  Opcode = ExtOpc;
451  else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
452  Op.getLLTTy(*getMRI()).getSizeInBits())
453  Opcode = TargetOpcode::G_TRUNC;
454  else
455  assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
456 
457  return buildInstr(Opcode, Res, Op);
458 }
459 
461  const SrcOp &Op) {
462  return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
463 }
464 
466  const SrcOp &Op) {
467  return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
468 }
469 
471  const SrcOp &Op) {
472  return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
473 }
474 
476  const SrcOp &Src) {
477  LLT SrcTy = Src.getLLTTy(*getMRI());
478  LLT DstTy = Dst.getLLTTy(*getMRI());
479  if (SrcTy == DstTy)
480  return buildCopy(Dst, Src);
481 
482  unsigned Opcode;
483  if (SrcTy.isPointer() && DstTy.isScalar())
484  Opcode = TargetOpcode::G_PTRTOINT;
485  else if (DstTy.isPointer() && SrcTy.isScalar())
486  Opcode = TargetOpcode::G_INTTOPTR;
487  else {
488  assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
489  Opcode = TargetOpcode::G_BITCAST;
490  }
491 
492  return buildInstr(Opcode, Dst, Src);
493 }
494 
496  const SrcOp &Src,
497  uint64_t Index) {
498  LLT SrcTy = Src.getLLTTy(*getMRI());
499  LLT DstTy = Dst.getLLTTy(*getMRI());
500 
501 #ifndef NDEBUG
502  assert(SrcTy.isValid() && "invalid operand type");
503  assert(DstTy.isValid() && "invalid operand type");
504  assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
505  "extracting off end of register");
506 #endif
507 
508  if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
509  assert(Index == 0 && "insertion past the end of a register");
510  return buildCast(Dst, Src);
511  }
512 
513  auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
514  Dst.addDefToMIB(*getMRI(), Extract);
515  Src.addSrcToMIB(Extract);
516  Extract.addImm(Index);
517  return Extract;
518 }
519 
521  ArrayRef<uint64_t> Indices) {
522 #ifndef NDEBUG
523  assert(Ops.size() == Indices.size() && "incompatible args");
524  assert(!Ops.empty() && "invalid trivial sequence");
525  assert(std::is_sorted(Indices.begin(), Indices.end()) &&
526  "sequence offsets must be in ascending order");
527 
528  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
529  for (auto Op : Ops)
530  assert(getMRI()->getType(Op).isValid() && "invalid operand type");
531 #endif
532 
533  LLT ResTy = getMRI()->getType(Res);
534  LLT OpTy = getMRI()->getType(Ops[0]);
535  unsigned OpSize = OpTy.getSizeInBits();
536  bool MaybeMerge = true;
537  for (unsigned i = 0; i < Ops.size(); ++i) {
538  if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
539  MaybeMerge = false;
540  break;
541  }
542  }
543 
544  if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
545  buildMerge(Res, Ops);
546  return;
547  }
548 
549  Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
550  buildUndef(ResIn);
551 
552  for (unsigned i = 0; i < Ops.size(); ++i) {
553  Register ResOut = i + 1 == Ops.size()
554  ? Res
556  buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
557  ResIn = ResOut;
558  }
559 }
560 
562  return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
563 }
564 
566  ArrayRef<Register> Ops) {
567  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
568  // we need some temporary storage for the DstOp objects. Here we use a
569  // sufficiently large SmallVector to not go through the heap.
570  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
571  return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
572 }
573 
575  const SrcOp &Op) {
576  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
577  // we need some temporary storage for the DstOp objects. Here we use a
578  // sufficiently large SmallVector to not go through the heap.
579  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
580  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
581 }
582 
584  const SrcOp &Op) {
585  unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
587  for (unsigned I = 0; I != NumReg; ++I)
588  TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
589  return buildUnmerge(TmpVec, Op);
590 }
591 
593  const SrcOp &Op) {
594  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
595  // we need some temporary storage for the DstOp objects. Here we use a
596  // sufficiently large SmallVector to not go through the heap.
597  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
598  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
599 }
600 
602  ArrayRef<Register> Ops) {
603  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
604  // we need some temporary storage for the DstOp objects. Here we use a
605  // sufficiently large SmallVector to not go through the heap.
606  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
607  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
608 }
609 
611  const SrcOp &Src) {
612  SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
613  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
614 }
615 
618  ArrayRef<Register> Ops) {
619  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
620  // we need some temporary storage for the DstOp objects. Here we use a
621  // sufficiently large SmallVector to not go through the heap.
622  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
623  return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
624 }
625 
628  // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
629  // we need some temporary storage for the DstOp objects. Here we use a
630  // sufficiently large SmallVector to not go through the heap.
631  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
632  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
633 }
634 
636  Register Op, unsigned Index) {
637  assert(Index + getMRI()->getType(Op).getSizeInBits() <=
638  getMRI()->getType(Res).getSizeInBits() &&
639  "insertion past the end of a register");
640 
641  if (getMRI()->getType(Res).getSizeInBits() ==
642  getMRI()->getType(Op).getSizeInBits()) {
643  return buildCast(Res, Op);
644  }
645 
646  return buildInstr(TargetOpcode::G_INSERT)
647  .addDef(Res)
648  .addUse(Src)
649  .addUse(Op)
650  .addImm(Index);
651 }
652 
654  ArrayRef<Register> ResultRegs,
655  bool HasSideEffects) {
656  auto MIB =
657  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
658  : TargetOpcode::G_INTRINSIC);
659  for (unsigned ResultReg : ResultRegs)
660  MIB.addDef(ResultReg);
661  MIB.addIntrinsicID(ID);
662  return MIB;
663 }
664 
667  bool HasSideEffects) {
668  auto MIB =
669  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
670  : TargetOpcode::G_INTRINSIC);
671  for (DstOp Result : Results)
672  Result.addDefToMIB(*getMRI(), MIB);
673  MIB.addIntrinsicID(ID);
674  return MIB;
675 }
676 
678  const SrcOp &Op) {
679  return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
680 }
681 
683  const SrcOp &Op) {
684  return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
685 }
686 
688  const DstOp &Res,
689  const SrcOp &Op0,
690  const SrcOp &Op1) {
691  return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
692 }
693 
695  const DstOp &Res,
696  const SrcOp &Op0,
697  const SrcOp &Op1) {
698 
699  return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1});
700 }
701 
703  const SrcOp &Tst,
704  const SrcOp &Op0,
705  const SrcOp &Op1) {
706 
707  return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1});
708 }
709 
712  const SrcOp &Elt, const SrcOp &Idx) {
713  return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
714 }
715 
718  const SrcOp &Idx) {
719  return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
720 }
721 
723  Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
724  Register NewVal, MachineMemOperand &MMO) {
725 #ifndef NDEBUG
726  LLT OldValResTy = getMRI()->getType(OldValRes);
727  LLT SuccessResTy = getMRI()->getType(SuccessRes);
728  LLT AddrTy = getMRI()->getType(Addr);
729  LLT CmpValTy = getMRI()->getType(CmpVal);
730  LLT NewValTy = getMRI()->getType(NewVal);
731  assert(OldValResTy.isScalar() && "invalid operand type");
732  assert(SuccessResTy.isScalar() && "invalid operand type");
733  assert(AddrTy.isPointer() && "invalid operand type");
734  assert(CmpValTy.isValid() && "invalid operand type");
735  assert(NewValTy.isValid() && "invalid operand type");
736  assert(OldValResTy == CmpValTy && "type mismatch");
737  assert(OldValResTy == NewValTy && "type mismatch");
738 #endif
739 
740  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
741  .addDef(OldValRes)
742  .addDef(SuccessRes)
743  .addUse(Addr)
744  .addUse(CmpVal)
745  .addUse(NewVal)
746  .addMemOperand(&MMO);
747 }
748 
751  Register CmpVal, Register NewVal,
752  MachineMemOperand &MMO) {
753 #ifndef NDEBUG
754  LLT OldValResTy = getMRI()->getType(OldValRes);
755  LLT AddrTy = getMRI()->getType(Addr);
756  LLT CmpValTy = getMRI()->getType(CmpVal);
757  LLT NewValTy = getMRI()->getType(NewVal);
758  assert(OldValResTy.isScalar() && "invalid operand type");
759  assert(AddrTy.isPointer() && "invalid operand type");
760  assert(CmpValTy.isValid() && "invalid operand type");
761  assert(NewValTy.isValid() && "invalid operand type");
762  assert(OldValResTy == CmpValTy && "type mismatch");
763  assert(OldValResTy == NewValTy && "type mismatch");
764 #endif
765 
766  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
767  .addDef(OldValRes)
768  .addUse(Addr)
769  .addUse(CmpVal)
770  .addUse(NewVal)
771  .addMemOperand(&MMO);
772 }
773 
775  Register OldValRes,
776  Register Addr,
777  Register Val,
778  MachineMemOperand &MMO) {
779 #ifndef NDEBUG
780  LLT OldValResTy = getMRI()->getType(OldValRes);
781  LLT AddrTy = getMRI()->getType(Addr);
782  LLT ValTy = getMRI()->getType(Val);
783  assert(OldValResTy.isScalar() && "invalid operand type");
784  assert(AddrTy.isPointer() && "invalid operand type");
785  assert(ValTy.isValid() && "invalid operand type");
786  assert(OldValResTy == ValTy && "type mismatch");
787 #endif
788 
789  return buildInstr(Opcode)
790  .addDef(OldValRes)
791  .addUse(Addr)
792  .addUse(Val)
793  .addMemOperand(&MMO);
794 }
795 
798  Register Val, MachineMemOperand &MMO) {
799  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
800  MMO);
801 }
804  Register Val, MachineMemOperand &MMO) {
805  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
806  MMO);
807 }
810  Register Val, MachineMemOperand &MMO) {
811  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
812  MMO);
813 }
816  Register Val, MachineMemOperand &MMO) {
817  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
818  MMO);
819 }
822  Register Val, MachineMemOperand &MMO) {
823  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
824  MMO);
825 }
827  Register Addr,
828  Register Val,
829  MachineMemOperand &MMO) {
830  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
831  MMO);
832 }
835  Register Val, MachineMemOperand &MMO) {
836  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
837  MMO);
838 }
841  Register Val, MachineMemOperand &MMO) {
842  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
843  MMO);
844 }
847  Register Val, MachineMemOperand &MMO) {
848  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
849  MMO);
850 }
853  Register Val, MachineMemOperand &MMO) {
854  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
855  MMO);
856 }
859  Register Val, MachineMemOperand &MMO) {
860  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
861  MMO);
862 }
863 
865 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
866  return buildInstr(TargetOpcode::G_FENCE)
867  .addImm(Ordering)
868  .addImm(Scope);
869 }
870 
873 #ifndef NDEBUG
874  assert(getMRI()->getType(Res).isPointer() && "invalid res type");
875 #endif
876 
877  return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
878 }
879 
880 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
881  bool IsExtend) {
882 #ifndef NDEBUG
883  if (DstTy.isVector()) {
884  assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
885  assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
886  "different number of elements in a trunc/ext");
887  } else
888  assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
889 
890  if (IsExtend)
891  assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
892  "invalid narrowing extend");
893  else
894  assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
895  "invalid widening trunc");
896 #endif
897 }
898 
899 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
900  const LLT &Op0Ty, const LLT &Op1Ty) {
901 #ifndef NDEBUG
902  assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
903  "invalid operand type");
904  assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
905  if (ResTy.isScalar() || ResTy.isPointer())
906  assert(TstTy.isScalar() && "type mismatch");
907  else
908  assert((TstTy.isScalar() ||
909  (TstTy.isVector() &&
910  TstTy.getNumElements() == Op0Ty.getNumElements())) &&
911  "type mismatch");
912 #endif
913 }
914 
916  ArrayRef<DstOp> DstOps,
917  ArrayRef<SrcOp> SrcOps,
918  Optional<unsigned> Flags) {
919  switch (Opc) {
920  default:
921  break;
922  case TargetOpcode::G_SELECT: {
923  assert(DstOps.size() == 1 && "Invalid select");
924  assert(SrcOps.size() == 3 && "Invalid select");
926  DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
927  SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
928  break;
929  }
930  case TargetOpcode::G_ADD:
931  case TargetOpcode::G_AND:
932  case TargetOpcode::G_MUL:
933  case TargetOpcode::G_OR:
934  case TargetOpcode::G_SUB:
935  case TargetOpcode::G_XOR:
936  case TargetOpcode::G_UDIV:
937  case TargetOpcode::G_SDIV:
938  case TargetOpcode::G_UREM:
939  case TargetOpcode::G_SREM:
940  case TargetOpcode::G_SMIN:
941  case TargetOpcode::G_SMAX:
942  case TargetOpcode::G_UMIN:
943  case TargetOpcode::G_UMAX: {
944  // All these are binary ops.
945  assert(DstOps.size() == 1 && "Invalid Dst");
946  assert(SrcOps.size() == 2 && "Invalid Srcs");
947  validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
948  SrcOps[0].getLLTTy(*getMRI()),
949  SrcOps[1].getLLTTy(*getMRI()));
950  break;
951  }
952  case TargetOpcode::G_SHL:
953  case TargetOpcode::G_ASHR:
954  case TargetOpcode::G_LSHR: {
955  assert(DstOps.size() == 1 && "Invalid Dst");
956  assert(SrcOps.size() == 2 && "Invalid Srcs");
957  validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
958  SrcOps[0].getLLTTy(*getMRI()),
959  SrcOps[1].getLLTTy(*getMRI()));
960  break;
961  }
962  case TargetOpcode::G_SEXT:
963  case TargetOpcode::G_ZEXT:
964  case TargetOpcode::G_ANYEXT:
965  assert(DstOps.size() == 1 && "Invalid Dst");
966  assert(SrcOps.size() == 1 && "Invalid Srcs");
967  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
968  SrcOps[0].getLLTTy(*getMRI()), true);
969  break;
970  case TargetOpcode::G_TRUNC:
971  case TargetOpcode::G_FPTRUNC: {
972  assert(DstOps.size() == 1 && "Invalid Dst");
973  assert(SrcOps.size() == 1 && "Invalid Srcs");
974  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
975  SrcOps[0].getLLTTy(*getMRI()), false);
976  break;
977  }
978  case TargetOpcode::COPY:
979  assert(DstOps.size() == 1 && "Invalid Dst");
980  // If the caller wants to add a subreg source it has to be done separately
981  // so we may not have any SrcOps at this point yet.
982  break;
983  case TargetOpcode::G_FCMP:
984  case TargetOpcode::G_ICMP: {
985  assert(DstOps.size() == 1 && "Invalid Dst Operands");
986  assert(SrcOps.size() == 3 && "Invalid Src Operands");
987  // For F/ICMP, the first src operand is the predicate, followed by
988  // the two comparands.
989  assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
990  "Expecting predicate");
991  assert([&]() -> bool {
992  CmpInst::Predicate Pred = SrcOps[0].getPredicate();
993  return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
994  : CmpInst::isFPPredicate(Pred);
995  }() && "Invalid predicate");
996  assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
997  "Type mismatch");
998  assert([&]() -> bool {
999  LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1000  LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1001  if (Op0Ty.isScalar() || Op0Ty.isPointer())
1002  return DstTy.isScalar();
1003  else
1004  return DstTy.isVector() &&
1005  DstTy.getNumElements() == Op0Ty.getNumElements();
1006  }() && "Type Mismatch");
1007  break;
1008  }
1009  case TargetOpcode::G_UNMERGE_VALUES: {
1010  assert(!DstOps.empty() && "Invalid trivial sequence");
1011  assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1012  assert(std::all_of(DstOps.begin(), DstOps.end(),
1013  [&, this](const DstOp &Op) {
1014  return Op.getLLTTy(*getMRI()) ==
1015  DstOps[0].getLLTTy(*getMRI());
1016  }) &&
1017  "type mismatch in output list");
1018  assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1019  SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1020  "input operands do not cover output register");
1021  break;
1022  }
1023  case TargetOpcode::G_MERGE_VALUES: {
1024  assert(!SrcOps.empty() && "invalid trivial sequence");
1025  assert(DstOps.size() == 1 && "Invalid Dst");
1026  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1027  [&, this](const SrcOp &Op) {
1028  return Op.getLLTTy(*getMRI()) ==
1029  SrcOps[0].getLLTTy(*getMRI());
1030  }) &&
1031  "type mismatch in input list");
1032  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1033  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1034  "input operands do not cover output register");
1035  if (SrcOps.size() == 1)
1036  return buildCast(DstOps[0], SrcOps[0]);
1037  if (DstOps[0].getLLTTy(*getMRI()).isVector())
1038  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1039  break;
1040  }
1041  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1042  assert(DstOps.size() == 1 && "Invalid Dst size");
1043  assert(SrcOps.size() == 2 && "Invalid Src size");
1044  assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1045  assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1046  DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1047  "Invalid operand type");
1048  assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1049  assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1050  DstOps[0].getLLTTy(*getMRI()) &&
1051  "Type mismatch");
1052  break;
1053  }
1054  case TargetOpcode::G_INSERT_VECTOR_ELT: {
1055  assert(DstOps.size() == 1 && "Invalid dst size");
1056  assert(SrcOps.size() == 3 && "Invalid src size");
1057  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1058  SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1059  assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1060  SrcOps[1].getLLTTy(*getMRI()) &&
1061  "Type mismatch");
1062  assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1063  assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1064  SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1065  "Type mismatch");
1066  break;
1067  }
1068  case TargetOpcode::G_BUILD_VECTOR: {
1069  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1070  "Must have at least 2 operands");
1071  assert(DstOps.size() == 1 && "Invalid DstOps");
1072  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1073  "Res type must be a vector");
1074  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1075  [&, this](const SrcOp &Op) {
1076  return Op.getLLTTy(*getMRI()) ==
1077  SrcOps[0].getLLTTy(*getMRI());
1078  }) &&
1079  "type mismatch in input list");
1080  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1081  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1082  "input scalars do not exactly cover the output vector register");
1083  break;
1084  }
1085  case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1086  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1087  "Must have at least 2 operands");
1088  assert(DstOps.size() == 1 && "Invalid DstOps");
1089  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1090  "Res type must be a vector");
1091  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1092  [&, this](const SrcOp &Op) {
1093  return Op.getLLTTy(*getMRI()) ==
1094  SrcOps[0].getLLTTy(*getMRI());
1095  }) &&
1096  "type mismatch in input list");
1097  if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1098  DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1099  return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1100  break;
1101  }
1102  case TargetOpcode::G_CONCAT_VECTORS: {
1103  assert(DstOps.size() == 1 && "Invalid DstOps");
1104  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1105  "Must have at least 2 operands");
1106  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1107  [&, this](const SrcOp &Op) {
1108  return (Op.getLLTTy(*getMRI()).isVector() &&
1109  Op.getLLTTy(*getMRI()) ==
1110  SrcOps[0].getLLTTy(*getMRI()));
1111  }) &&
1112  "type mismatch in input list");
1113  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1114  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1115  "input vectors do not exactly cover the output vector register");
1116  break;
1117  }
1118  case TargetOpcode::G_UADDE: {
1119  assert(DstOps.size() == 2 && "Invalid no of dst operands");
1120  assert(SrcOps.size() == 3 && "Invalid no of src operands");
1121  assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1122  assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1123  (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1124  "Invalid operand");
1125  assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1126  assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1127  "type mismatch");
1128  break;
1129  }
1130  }
1131 
1132  auto MIB = buildInstr(Opc);
1133  for (const DstOp &Op : DstOps)
1134  Op.addDefToMIB(*getMRI(), MIB);
1135  for (const SrcOp &Op : SrcOps)
1136  Op.addSrcToMIB(MIB);
1137  if (Flags)
1138  MIB->setFlags(*Flags);
1139  return MIB;
1140 }
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
bool isFPPredicate() const
Definition: InstrTypes.h:824
uint64_t CallInst * C
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
The CSE Analysis object.
Definition: CSEInfo.h:71
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
MachineInstrBuilder buildBrCond(Register Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildInsert(Register Res, Register Src, Register Op, unsigned Index)
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
iterator begin() const
Definition: ArrayRef.h:136
unsigned getScalarSizeInBits() const
unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
bool isScalar() const
GISelChangeObserver * Observer
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
unsigned Reg
virtual const TargetLowering * getTargetLowering() const
LLT getScalarType() const
Function Alias Analysis Results
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
void addSrcToMIB(MachineInstrBuilder &MIB) const
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:205
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1192
A debug info location.
Definition: DebugLoc.h:33
Metadata node.
Definition: Metadata.h:863
void buildSequence(Register Res, ArrayRef< Register > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
const fltSemantics & getSemantics() const
Definition: APFloat.h:1165
void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty, const LLT &Op1Ty)
MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res, CarryOut = G_UADDO Op0, Op1.
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:142
LegalityPredicate isPointer(unsigned TypeIdx)
True iff the specified type index is a pointer (with any address space).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
Optional< MachineInstrBuilder > materializeGEP(Register &Res, Register Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
bool isVector() const
void setMF(MachineFunction &MF)
The address of a basic block.
Definition: Constants.h:839
A description of a memory reference used in the backend.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1, const SrcOp &CarryIn)
Build and insert Res, CarryOut = G_UADDE Op0, Op1, CarryIn.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend)
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineBasicBlock::iterator II
void recordInsertion(MachineInstr *MI) const
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:309
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineFunction & getMF()
Getter for the function we currently build.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
virtual const TargetInstrInfo * getInstrInfo() const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:258
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void setChangeObserver(GISelChangeObserver &Observer)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
void validateShiftOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineRegisterInfo * getMRI()
Getter for MRI.
Abstract class that contains various methods for clients to notify about changes. ...
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_FPTRUNC Op.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned char TargetFlags=0) const
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
This is an important base class in LLVM.
Definition: Constant.h:41
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
virtual void createdInstr(MachineInstr &MI)=0
An instruction has been created and inserted into the function.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:572
DebugLoc DL
Debug location to be set to any instruction we create.
self_iterator getIterator()
Definition: ilist_node.h:81
unsigned getAddressSpace() const
MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_GEP Op0, Op1.
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
static wasm::ValType getType(const TargetRegisterClass *RC)
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
const APFloat & getValueAPF() const
Definition: Constants.h:302
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:239
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
iterator end() const
Definition: ArrayRef.h:137
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
const TargetInstrInfo & getTII()
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
LegalityPredicate isScalar(unsigned TypeIdx)
True iff the specified type index is a scalar.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:694
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
void setCSEInfo(GISelCSEInfo *Info)
This file declares the MachineIRBuilder class.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
bool isIntPredicate() const
Definition: InstrTypes.h:825
Class for arbitrary precision integers.
Definition: APInt.h:69
Register getReg() const
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
bool isPointer() const
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:256
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
LLVM Value Representation.
Definition: Value.h:72
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineFunction * MF
MachineFunction under construction.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:277
Wrapper class representing virtual and physical registers.
Definition: Register.h:18
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
This file describes how to lower LLVM code to machine code.
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.