LLVM  7.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/Analysis.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/Constant.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DebugInfo.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
43 #include "llvm/IR/InlineAsm.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/Metadata.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/MC/MCContext.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CodeGen.h"
57 #include "llvm/Support/Debug.h"
64 #include <algorithm>
65 #include <cassert>
66 #include <cstdint>
67 #include <iterator>
68 #include <string>
69 #include <utility>
70 #include <vector>
71 
72 #define DEBUG_TYPE "irtranslator"
73 
74 using namespace llvm;
75 
76 char IRTranslator::ID = 0;
77 
78 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
79  false, false)
81 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
82  false, false)
83 
89 
90  // Print the function name explicitly if we don't have a debug location (which
91  // makes the diagnostic less useful) or if we're going to emit a raw error.
92  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
93  R << (" (in function: " + MF.getName() + ")").str();
94 
95  if (TPC.isGlobalISelAbortEnabled())
96  report_fatal_error(R.getMsg());
97  else
98  ORE.emit(R);
99 }
100 
103 }
104 
110 }
111 
112 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
113  SmallVectorImpl<LLT> &ValueTys,
115  uint64_t StartingOffset = 0) {
116  // Given a struct type, recursively traverse the elements.
117  if (StructType *STy = dyn_cast<StructType>(&Ty)) {
118  const StructLayout *SL = DL.getStructLayout(STy);
119  for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
120  computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
121  StartingOffset + SL->getElementOffset(I));
122  return;
123  }
124  // Given an array type, recursively traverse the elements.
125  if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
126  Type *EltTy = ATy->getElementType();
127  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
128  for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
129  computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
130  StartingOffset + i * EltSize);
131  return;
132  }
133  // Interpret void as zero return values.
134  if (Ty.isVoidTy())
135  return;
136  // Base case: we can get an LLT for this LLVM IR type.
137  ValueTys.push_back(getLLTForType(Ty, DL));
138  if (Offsets != nullptr)
139  Offsets->push_back(StartingOffset * 8);
140 }
141 
143 IRTranslator::allocateVRegs(const Value &Val) {
144  assert(!VMap.contains(Val) && "Value already allocated in VMap");
145  auto *Regs = VMap.getVRegs(Val);
146  auto *Offsets = VMap.getOffsets(Val);
147  SmallVector<LLT, 4> SplitTys;
148  computeValueLLTs(*DL, *Val.getType(), SplitTys,
149  Offsets->empty() ? Offsets : nullptr);
150  for (unsigned i = 0; i < SplitTys.size(); ++i)
151  Regs->push_back(0);
152  return *Regs;
153 }
154 
155 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
156  auto VRegsIt = VMap.findVRegs(Val);
157  if (VRegsIt != VMap.vregs_end())
158  return *VRegsIt->second;
159 
160  if (Val.getType()->isVoidTy())
161  return *VMap.getVRegs(Val);
162 
163  // Create entry for this type.
164  auto *VRegs = VMap.getVRegs(Val);
165  auto *Offsets = VMap.getOffsets(Val);
166 
167  assert(Val.getType()->isSized() &&
168  "Don't know how to create an empty vreg");
169 
170  SmallVector<LLT, 4> SplitTys;
171  computeValueLLTs(*DL, *Val.getType(), SplitTys,
172  Offsets->empty() ? Offsets : nullptr);
173 
174  if (!isa<Constant>(Val)) {
175  for (auto Ty : SplitTys)
176  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
177  return *VRegs;
178  }
179 
180  if (Val.getType()->isAggregateType()) {
181  // UndefValue, ConstantAggregateZero
182  auto &C = cast<Constant>(Val);
183  unsigned Idx = 0;
184  while (auto Elt = C.getAggregateElement(Idx++)) {
185  auto EltRegs = getOrCreateVRegs(*Elt);
186  std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs));
187  }
188  } else {
189  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
190  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
191  bool Success = translate(cast<Constant>(Val), VRegs->front());
192  if (!Success) {
193  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
194  MF->getFunction().getSubprogram(),
195  &MF->getFunction().getEntryBlock());
196  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
197  reportTranslationError(*MF, *TPC, *ORE, R);
198  return *VRegs;
199  }
200  }
201 
202  return *VRegs;
203 }
204 
205 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
206  if (FrameIndices.find(&AI) != FrameIndices.end())
207  return FrameIndices[&AI];
208 
209  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
210  unsigned Size =
211  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
212 
213  // Always allocate at least one byte.
214  Size = std::max(Size, 1u);
215 
216  unsigned Alignment = AI.getAlignment();
217  if (!Alignment)
218  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
219 
220  int &FI = FrameIndices[&AI];
221  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
222  return FI;
223 }
224 
225 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
226  unsigned Alignment = 0;
227  Type *ValTy = nullptr;
228  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
229  Alignment = SI->getAlignment();
230  ValTy = SI->getValueOperand()->getType();
231  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
232  Alignment = LI->getAlignment();
233  ValTy = LI->getType();
234  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
235  // TODO(PR27168): This instruction has no alignment attribute, but unlike
236  // the default alignment for load/store, the default here is to assume
237  // it has NATURAL alignment, not DataLayout-specified alignment.
238  const DataLayout &DL = AI->getModule()->getDataLayout();
239  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
240  ValTy = AI->getCompareOperand()->getType();
241  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
242  // TODO(PR27168): This instruction has no alignment attribute, but unlike
243  // the default alignment for load/store, the default here is to assume
244  // it has NATURAL alignment, not DataLayout-specified alignment.
245  const DataLayout &DL = AI->getModule()->getDataLayout();
246  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
247  ValTy = AI->getType();
248  } else {
249  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
250  R << "unable to translate memop: " << ore::NV("Opcode", &I);
251  reportTranslationError(*MF, *TPC, *ORE, R);
252  return 1;
253  }
254 
255  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
256 }
257 
258 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
259  MachineBasicBlock *&MBB = BBToMBB[&BB];
260  assert(MBB && "BasicBlock was not encountered before");
261  return *MBB;
262 }
263 
264 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
265  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
266  MachinePreds[Edge].push_back(NewPred);
267 }
268 
269 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
270  MachineIRBuilder &MIRBuilder) {
271  // FIXME: handle signed/unsigned wrapping flags.
272 
273  // Get or create a virtual register for each value.
274  // Unless the value is a Constant => loadimm cst?
275  // or inline constant each time?
276  // Creation of a virtual register needs to have a size.
277  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
278  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
279  unsigned Res = getOrCreateVReg(U);
280  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
281  return true;
282 }
283 
284 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
285  // -0.0 - X --> G_FNEG
286  if (isa<Constant>(U.getOperand(0)) &&
288  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
289  .addDef(getOrCreateVReg(U))
290  .addUse(getOrCreateVReg(*U.getOperand(1)));
291  return true;
292  }
293  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
294 }
295 
296 bool IRTranslator::translateCompare(const User &U,
297  MachineIRBuilder &MIRBuilder) {
298  const CmpInst *CI = dyn_cast<CmpInst>(&U);
299  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
300  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
301  unsigned Res = getOrCreateVReg(U);
302  CmpInst::Predicate Pred =
303  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
304  cast<ConstantExpr>(U).getPredicate());
305  if (CmpInst::isIntPredicate(Pred))
306  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
307  else if (Pred == CmpInst::FCMP_FALSE)
308  MIRBuilder.buildCopy(
309  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
310  else if (Pred == CmpInst::FCMP_TRUE)
311  MIRBuilder.buildCopy(
312  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
313  else
314  MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
315 
316  return true;
317 }
318 
319 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
320  const ReturnInst &RI = cast<ReturnInst>(U);
321  const Value *Ret = RI.getReturnValue();
322  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
323  Ret = nullptr;
324  // The target may mess up with the insertion point, but
325  // this is not important as a return is the last instruction
326  // of the block anyway.
327 
328  // FIXME: this interface should simplify when CallLowering gets adapted to
329  // multiple VRegs per Value.
330  unsigned VReg = Ret ? packRegs(*Ret, MIRBuilder) : 0;
331  return CLI->lowerReturn(MIRBuilder, Ret, VReg);
332 }
333 
334 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
335  const BranchInst &BrInst = cast<BranchInst>(U);
336  unsigned Succ = 0;
337  if (!BrInst.isUnconditional()) {
338  // We want a G_BRCOND to the true BB followed by an unconditional branch.
339  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
340  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
341  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
342  MIRBuilder.buildBrCond(Tst, TrueBB);
343  }
344 
345  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
346  MachineBasicBlock &TgtBB = getMBB(BrTgt);
347  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
348 
349  // If the unconditional target is the layout successor, fallthrough.
350  if (!CurBB.isLayoutSuccessor(&TgtBB))
351  MIRBuilder.buildBr(TgtBB);
352 
353  // Link successors.
354  for (const BasicBlock *Succ : BrInst.successors())
355  CurBB.addSuccessor(&getMBB(*Succ));
356  return true;
357 }
358 
359 bool IRTranslator::translateSwitch(const User &U,
360  MachineIRBuilder &MIRBuilder) {
361  // For now, just translate as a chain of conditional branches.
362  // FIXME: could we share most of the logic/code in
363  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
364  // At first sight, it seems most of the logic in there is independent of
365  // SelectionDAG-specifics and a lot of work went in to optimize switch
366  // lowering in there.
367 
368  const SwitchInst &SwInst = cast<SwitchInst>(U);
369  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
370  const BasicBlock *OrigBB = SwInst.getParent();
371 
372  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
373  for (auto &CaseIt : SwInst.cases()) {
374  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
375  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
376  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
377  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
378  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
379  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
380 
381  MIRBuilder.buildBrCond(Tst, TrueMBB);
382  CurMBB.addSuccessor(&TrueMBB);
383  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
384 
385  MachineBasicBlock *FalseMBB =
386  MF->CreateMachineBasicBlock(SwInst.getParent());
387  // Insert the comparison blocks one after the other.
388  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
389  MIRBuilder.buildBr(*FalseMBB);
390  CurMBB.addSuccessor(FalseMBB);
391 
392  MIRBuilder.setMBB(*FalseMBB);
393  }
394  // handle default case
395  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
396  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
397  MIRBuilder.buildBr(DefaultMBB);
398  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
399  CurMBB.addSuccessor(&DefaultMBB);
400  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
401 
402  return true;
403 }
404 
405 bool IRTranslator::translateIndirectBr(const User &U,
406  MachineIRBuilder &MIRBuilder) {
407  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
408 
409  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
410  MIRBuilder.buildBrIndirect(Tgt);
411 
412  // Link successors.
413  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
414  for (const BasicBlock *Succ : BrInst.successors())
415  CurBB.addSuccessor(&getMBB(*Succ));
416 
417  return true;
418 }
419 
420 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
421  const LoadInst &LI = cast<LoadInst>(U);
422 
423  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
425  Flags |= MachineMemOperand::MOLoad;
426 
427  if (DL->getTypeStoreSize(LI.getType()) == 0)
428  return true;
429 
430  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
431  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
432  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
433 
434  for (unsigned i = 0; i < Regs.size(); ++i) {
435  unsigned Addr = 0;
436  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
437 
438  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
439  unsigned BaseAlign = getMemOpAlignment(LI);
440  auto MMO = MF->getMachineMemOperand(
441  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
442  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
443  LI.getSyncScopeID(), LI.getOrdering());
444  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
445  }
446 
447  return true;
448 }
449 
450 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
451  const StoreInst &SI = cast<StoreInst>(U);
452  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
455 
456  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
457  return true;
458 
459  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
460  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
461  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
462 
463  for (unsigned i = 0; i < Vals.size(); ++i) {
464  unsigned Addr = 0;
465  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
466 
467  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
468  unsigned BaseAlign = getMemOpAlignment(SI);
469  auto MMO = MF->getMachineMemOperand(
470  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
471  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
472  SI.getSyncScopeID(), SI.getOrdering());
473  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
474  }
475  return true;
476 }
477 
478 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
479  const Value *Src = U.getOperand(0);
481 
482  // getIndexedOffsetInType is designed for GEPs, so the first index is the
483  // usual array element rather than looking into the actual aggregate.
484  SmallVector<Value *, 1> Indices;
485  Indices.push_back(ConstantInt::get(Int32Ty, 0));
486 
487  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
488  for (auto Idx : EVI->indices())
489  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
490  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
491  for (auto Idx : IVI->indices())
492  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
493  } else {
494  for (unsigned i = 1; i < U.getNumOperands(); ++i)
495  Indices.push_back(U.getOperand(i));
496  }
497 
498  return 8 * static_cast<uint64_t>(
499  DL.getIndexedOffsetInType(Src->getType(), Indices));
500 }
501 
502 bool IRTranslator::translateExtractValue(const User &U,
503  MachineIRBuilder &MIRBuilder) {
504  const Value *Src = U.getOperand(0);
505  uint64_t Offset = getOffsetFromIndices(U, *DL);
506  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
507  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
508  unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
509  Offsets.begin();
510  auto &DstRegs = allocateVRegs(U);
511 
512  for (unsigned i = 0; i < DstRegs.size(); ++i)
513  DstRegs[i] = SrcRegs[Idx++];
514 
515  return true;
516 }
517 
518 bool IRTranslator::translateInsertValue(const User &U,
519  MachineIRBuilder &MIRBuilder) {
520  const Value *Src = U.getOperand(0);
521  uint64_t Offset = getOffsetFromIndices(U, *DL);
522  auto &DstRegs = allocateVRegs(U);
523  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
524  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
525  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
526  auto InsertedIt = InsertedRegs.begin();
527 
528  for (unsigned i = 0; i < DstRegs.size(); ++i) {
529  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
530  DstRegs[i] = *InsertedIt++;
531  else
532  DstRegs[i] = SrcRegs[i];
533  }
534 
535  return true;
536 }
537 
538 bool IRTranslator::translateSelect(const User &U,
539  MachineIRBuilder &MIRBuilder) {
540  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
541  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
542  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
543  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
544 
545  for (unsigned i = 0; i < ResRegs.size(); ++i)
546  MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
547 
548  return true;
549 }
550 
551 bool IRTranslator::translateBitCast(const User &U,
552  MachineIRBuilder &MIRBuilder) {
553  // If we're bitcasting to the source type, we can reuse the source vreg.
554  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
555  getLLTForType(*U.getType(), *DL)) {
556  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
557  auto &Regs = *VMap.getVRegs(U);
558  // If we already assigned a vreg for this bitcast, we can't change that.
559  // Emit a copy to satisfy the users we already emitted.
560  if (!Regs.empty())
561  MIRBuilder.buildCopy(Regs[0], SrcReg);
562  else {
563  Regs.push_back(SrcReg);
564  VMap.getOffsets(U)->push_back(0);
565  }
566  return true;
567  }
568  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
569 }
570 
571 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
572  MachineIRBuilder &MIRBuilder) {
573  unsigned Op = getOrCreateVReg(*U.getOperand(0));
574  unsigned Res = getOrCreateVReg(U);
575  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
576  return true;
577 }
578 
579 bool IRTranslator::translateGetElementPtr(const User &U,
580  MachineIRBuilder &MIRBuilder) {
581  // FIXME: support vector GEPs.
582  if (U.getType()->isVectorTy())
583  return false;
584 
585  Value &Op0 = *U.getOperand(0);
586  unsigned BaseReg = getOrCreateVReg(Op0);
587  Type *PtrIRTy = Op0.getType();
588  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
589  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
590  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
591 
592  int64_t Offset = 0;
593  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
594  GTI != E; ++GTI) {
595  const Value *Idx = GTI.getOperand();
596  if (StructType *StTy = GTI.getStructTypeOrNull()) {
597  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
598  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
599  continue;
600  } else {
601  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
602 
603  // If this is a scalar constant or a splat vector of constants,
604  // handle it quickly.
605  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
606  Offset += ElementSize * CI->getSExtValue();
607  continue;
608  }
609 
610  if (Offset != 0) {
611  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
612  unsigned OffsetReg =
613  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
614  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
615 
616  BaseReg = NewBaseReg;
617  Offset = 0;
618  }
619 
620  unsigned IdxReg = getOrCreateVReg(*Idx);
621  if (MRI->getType(IdxReg) != OffsetTy) {
622  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
623  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
624  IdxReg = NewIdxReg;
625  }
626 
627  // N = N + Idx * ElementSize;
628  // Avoid doing it for ElementSize of 1.
629  unsigned GepOffsetReg;
630  if (ElementSize != 1) {
631  unsigned ElementSizeReg =
632  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
633 
634  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
635  MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
636  } else
637  GepOffsetReg = IdxReg;
638 
639  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
640  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
641  BaseReg = NewBaseReg;
642  }
643  }
644 
645  if (Offset != 0) {
646  unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
647  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
648  return true;
649  }
650 
651  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
652  return true;
653 }
654 
655 bool IRTranslator::translateMemfunc(const CallInst &CI,
656  MachineIRBuilder &MIRBuilder,
657  unsigned ID) {
658  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
659  Type *DstTy = CI.getArgOperand(0)->getType();
660  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
661  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
662  return false;
663 
665  for (int i = 0; i < 3; ++i) {
666  const auto &Arg = CI.getArgOperand(i);
667  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
668  }
669 
670  const char *Callee;
671  switch (ID) {
672  case Intrinsic::memmove:
673  case Intrinsic::memcpy: {
674  Type *SrcTy = CI.getArgOperand(1)->getType();
675  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
676  return false;
677  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
678  break;
679  }
680  case Intrinsic::memset:
681  Callee = "memset";
682  break;
683  default:
684  return false;
685  }
686 
687  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
688  MachineOperand::CreateES(Callee),
689  CallLowering::ArgInfo(0, CI.getType()), Args);
690 }
691 
692 void IRTranslator::getStackGuard(unsigned DstReg,
693  MachineIRBuilder &MIRBuilder) {
695  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
696  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
697  MIB.addDef(DstReg);
698 
699  auto &TLI = *MF->getSubtarget().getTargetLowering();
700  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
701  if (!Global)
702  return;
703 
704  MachinePointerInfo MPInfo(Global);
708  *MemRefs =
709  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
710  DL->getPointerABIAlignment(0));
711  MIB.setMemRefs(MemRefs, MemRefs + 1);
712 }
713 
714 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
715  MachineIRBuilder &MIRBuilder) {
716  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
717  auto MIB = MIRBuilder.buildInstr(Op)
718  .addDef(ResRegs[0])
719  .addDef(ResRegs[1])
720  .addUse(getOrCreateVReg(*CI.getOperand(0)))
721  .addUse(getOrCreateVReg(*CI.getOperand(1)));
722 
723  if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
724  unsigned Zero = getOrCreateVReg(
726  MIB.addUse(Zero);
727  }
728 
729  return true;
730 }
731 
732 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
733  MachineIRBuilder &MIRBuilder) {
734  switch (ID) {
735  default:
736  break;
737  case Intrinsic::lifetime_start:
738  case Intrinsic::lifetime_end:
739  // Stack coloring is not enabled in O0 (which we care about now) so we can
740  // drop these. Make sure someone notices when we start compiling at higher
741  // opts though.
742  if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
743  return false;
744  return true;
745  case Intrinsic::dbg_declare: {
746  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
747  assert(DI.getVariable() && "Missing variable");
748 
749  const Value *Address = DI.getAddress();
750  if (!Address || isa<UndefValue>(Address)) {
751  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
752  return true;
753  }
754 
756  MIRBuilder.getDebugLoc()) &&
757  "Expected inlined-at fields to agree");
758  auto AI = dyn_cast<AllocaInst>(Address);
759  if (AI && AI->isStaticAlloca()) {
760  // Static allocas are tracked at the MF level, no need for DBG_VALUE
761  // instructions (in fact, they get ignored if they *do* exist).
763  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
764  } else
765  MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
766  DI.getVariable(), DI.getExpression());
767  return true;
768  }
769  case Intrinsic::vaend:
770  // No target I know of cares about va_end. Certainly no in-tree target
771  // does. Simplest intrinsic ever!
772  return true;
773  case Intrinsic::vastart: {
774  auto &TLI = *MF->getSubtarget().getTargetLowering();
775  Value *Ptr = CI.getArgOperand(0);
776  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
777 
778  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
779  .addUse(getOrCreateVReg(*Ptr))
781  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
782  return true;
783  }
784  case Intrinsic::dbg_value: {
785  // This form of DBG_VALUE is target-independent.
786  const DbgValueInst &DI = cast<DbgValueInst>(CI);
787  const Value *V = DI.getValue();
789  MIRBuilder.getDebugLoc()) &&
790  "Expected inlined-at fields to agree");
791  if (!V) {
792  // Currently the optimizer can produce this; insert an undef to
793  // help debugging. Probably the optimizer should not do this.
794  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
795  } else if (const auto *CI = dyn_cast<Constant>(V)) {
796  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
797  } else {
798  unsigned Reg = getOrCreateVReg(*V);
799  // FIXME: This does not handle register-indirect values at offset 0. The
800  // direct/indirect thing shouldn't really be handled by something as
801  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
802  // pretty baked in right now.
803  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
804  }
805  return true;
806  }
807  case Intrinsic::uadd_with_overflow:
808  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
809  case Intrinsic::sadd_with_overflow:
810  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
811  case Intrinsic::usub_with_overflow:
812  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
813  case Intrinsic::ssub_with_overflow:
814  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
815  case Intrinsic::umul_with_overflow:
816  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
817  case Intrinsic::smul_with_overflow:
818  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
819  case Intrinsic::pow:
820  MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
821  .addDef(getOrCreateVReg(CI))
822  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
823  .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
824  return true;
825  case Intrinsic::exp:
826  MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
827  .addDef(getOrCreateVReg(CI))
828  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
829  return true;
830  case Intrinsic::exp2:
831  MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
832  .addDef(getOrCreateVReg(CI))
833  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
834  return true;
835  case Intrinsic::log:
836  MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
837  .addDef(getOrCreateVReg(CI))
838  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
839  return true;
840  case Intrinsic::log2:
841  MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
842  .addDef(getOrCreateVReg(CI))
843  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
844  return true;
845  case Intrinsic::fabs:
846  MIRBuilder.buildInstr(TargetOpcode::G_FABS)
847  .addDef(getOrCreateVReg(CI))
848  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
849  return true;
850  case Intrinsic::fma:
851  MIRBuilder.buildInstr(TargetOpcode::G_FMA)
852  .addDef(getOrCreateVReg(CI))
853  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
854  .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
855  .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
856  return true;
857  case Intrinsic::fmuladd: {
858  const TargetMachine &TM = MF->getTarget();
859  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
860  unsigned Dst = getOrCreateVReg(CI);
861  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
862  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
863  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
865  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
866  // TODO: Revisit this to see if we should move this part of the
867  // lowering to the combiner.
868  MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
869  } else {
870  LLT Ty = getLLTForType(*CI.getType(), *DL);
871  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
872  MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
873  }
874  return true;
875  }
876  case Intrinsic::memcpy:
877  case Intrinsic::memmove:
878  case Intrinsic::memset:
879  return translateMemfunc(CI, MIRBuilder, ID);
880  case Intrinsic::eh_typeid_for: {
882  unsigned Reg = getOrCreateVReg(CI);
883  unsigned TypeID = MF->getTypeIDFor(GV);
884  MIRBuilder.buildConstant(Reg, TypeID);
885  return true;
886  }
887  case Intrinsic::objectsize: {
888  // If we don't know by now, we're never going to know.
889  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
890 
891  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
892  return true;
893  }
894  case Intrinsic::stackguard:
895  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
896  return true;
897  case Intrinsic::stackprotector: {
898  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
899  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
900  getStackGuard(GuardVal, MIRBuilder);
901 
902  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
903  MIRBuilder.buildStore(
904  GuardVal, getOrCreateVReg(*Slot),
907  getOrCreateFrameIndex(*Slot)),
909  PtrTy.getSizeInBits() / 8, 8));
910  return true;
911  }
912  }
913  return false;
914 }
915 
916 bool IRTranslator::translateInlineAsm(const CallInst &CI,
917  MachineIRBuilder &MIRBuilder) {
918  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
919  if (!IA.getConstraintString().empty())
920  return false;
921 
922  unsigned ExtraInfo = 0;
923  if (IA.hasSideEffects())
924  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
925  if (IA.getDialect() == InlineAsm::AD_Intel)
926  ExtraInfo |= InlineAsm::Extra_AsmDialect;
927 
929  .addExternalSymbol(IA.getAsmString().c_str())
930  .addImm(ExtraInfo);
931 
932  return true;
933 }
934 
935 unsigned IRTranslator::packRegs(const Value &V,
936  MachineIRBuilder &MIRBuilder) {
937  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
938  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
939  LLT BigTy = getLLTForType(*V.getType(), *DL);
940 
941  if (Regs.size() == 1)
942  return Regs[0];
943 
944  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
945  MIRBuilder.buildUndef(Dst);
946  for (unsigned i = 0; i < Regs.size(); ++i) {
947  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
948  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
949  Dst = NewDst;
950  }
951  return Dst;
952 }
953 
954 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
955  MachineIRBuilder &MIRBuilder) {
956  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
957  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
958 
959  for (unsigned i = 0; i < Regs.size(); ++i)
960  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
961 }
962 
963 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
964  const CallInst &CI = cast<CallInst>(U);
965  auto TII = MF->getTarget().getIntrinsicInfo();
966  const Function *F = CI.getCalledFunction();
967 
968  // FIXME: support Windows dllimport function calls.
969  if (F && F->hasDLLImportStorageClass())
970  return false;
971 
972  if (CI.isInlineAsm())
973  return translateInlineAsm(CI, MIRBuilder);
974 
976  if (F && F->isIntrinsic()) {
977  ID = F->getIntrinsicID();
978  if (TII && ID == Intrinsic::not_intrinsic)
979  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
980  }
981 
982  bool IsSplitType = valueIsSplit(CI);
983  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
984  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
985  getLLTForType(*CI.getType(), *DL))
986  : getOrCreateVReg(CI);
987 
989  for (auto &Arg: CI.arg_operands())
990  Args.push_back(packRegs(*Arg, MIRBuilder));
991 
992  MF->getFrameInfo().setHasCalls(true);
993  bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
994  return getOrCreateVReg(*CI.getCalledValue());
995  });
996 
997  if (IsSplitType)
998  unpackRegs(CI, Res, MIRBuilder);
999  return Success;
1000  }
1001 
1002  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1003 
1004  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1005  return true;
1006 
1007  unsigned Res = 0;
1008  if (!CI.getType()->isVoidTy()) {
1009  if (IsSplitType)
1010  Res =
1012  else
1013  Res = getOrCreateVReg(CI);
1014  }
1015  MachineInstrBuilder MIB =
1016  MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1017 
1018  for (auto &Arg : CI.arg_operands()) {
1019  // Some intrinsics take metadata parameters. Reject them.
1020  if (isa<MetadataAsValue>(Arg))
1021  return false;
1022  MIB.addUse(packRegs(*Arg, MIRBuilder));
1023  }
1024 
1025  if (IsSplitType)
1026  unpackRegs(CI, Res, MIRBuilder);
1027 
1028  // Add a MachineMemOperand if it is a target mem intrinsic.
1029  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1030  TargetLowering::IntrinsicInfo Info;
1031  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1032  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1033  uint64_t Size = Info.memVT.getStoreSize();
1035  Info.flags, Size, Info.align));
1036  }
1037 
1038  return true;
1039 }
1040 
1041 bool IRTranslator::translateInvoke(const User &U,
1042  MachineIRBuilder &MIRBuilder) {
1043  const InvokeInst &I = cast<InvokeInst>(U);
1044  MCContext &Context = MF->getContext();
1045 
1046  const BasicBlock *ReturnBB = I.getSuccessor(0);
1047  const BasicBlock *EHPadBB = I.getSuccessor(1);
1048 
1049  const Value *Callee = I.getCalledValue();
1050  const Function *Fn = dyn_cast<Function>(Callee);
1051  if (isa<InlineAsm>(Callee))
1052  return false;
1053 
1054  // FIXME: support invoking patchpoint and statepoint intrinsics.
1055  if (Fn && Fn->isIntrinsic())
1056  return false;
1057 
1058  // FIXME: support whatever these are.
1060  return false;
1061 
1062  // FIXME: support Windows exception handling.
1063  if (!isa<LandingPadInst>(EHPadBB->front()))
1064  return false;
1065 
1066  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1067  // the region covered by the try.
1068  MCSymbol *BeginSymbol = Context.createTempSymbol();
1069  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1070 
1071  unsigned Res =
1074  for (auto &Arg: I.arg_operands())
1075  Args.push_back(packRegs(*Arg, MIRBuilder));
1076 
1077  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1078  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1079  return false;
1080 
1081  unpackRegs(I, Res, MIRBuilder);
1082 
1083  MCSymbol *EndSymbol = Context.createTempSymbol();
1084  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1085 
1086  // FIXME: track probabilities.
1087  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1088  &ReturnMBB = getMBB(*ReturnBB);
1089  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1090  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1091  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1092  MIRBuilder.buildBr(ReturnMBB);
1093 
1094  return true;
1095 }
1096 
1097 bool IRTranslator::translateLandingPad(const User &U,
1098  MachineIRBuilder &MIRBuilder) {
1099  const LandingPadInst &LP = cast<LandingPadInst>(U);
1100 
1101  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1102  addLandingPadInfo(LP, MBB);
1103 
1104  MBB.setIsEHPad();
1105 
1106  // If there aren't registers to copy the values into (e.g., during SjLj
1107  // exceptions), then don't bother.
1108  auto &TLI = *MF->getSubtarget().getTargetLowering();
1109  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1110  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1111  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1112  return true;
1113 
1114  // If landingpad's return type is token type, we don't create DAG nodes
1115  // for its exception pointer and selector value. The extraction of exception
1116  // pointer or selector value from token type landingpads is not currently
1117  // supported.
1118  if (LP.getType()->isTokenTy())
1119  return true;
1120 
1121  // Add a label to mark the beginning of the landing pad. Deletion of the
1122  // landing pad can thus be detected via the MachineModuleInfo.
1124  .addSym(MF->addLandingPad(&MBB));
1125 
1126  LLT Ty = getLLTForType(*LP.getType(), *DL);
1127  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1128  MIRBuilder.buildUndef(Undef);
1129 
1130  SmallVector<LLT, 2> Tys;
1131  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1132  Tys.push_back(getLLTForType(*Ty, *DL));
1133  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1134 
1135  // Mark exception register as live in.
1136  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1137  if (!ExceptionReg)
1138  return false;
1139 
1140  MBB.addLiveIn(ExceptionReg);
1141  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1142  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1143 
1144  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1145  if (!SelectorReg)
1146  return false;
1147 
1148  MBB.addLiveIn(SelectorReg);
1149  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1150  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1151  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1152 
1153  return true;
1154 }
1155 
1156 bool IRTranslator::translateAlloca(const User &U,
1157  MachineIRBuilder &MIRBuilder) {
1158  auto &AI = cast<AllocaInst>(U);
1159 
1160  if (AI.isStaticAlloca()) {
1161  unsigned Res = getOrCreateVReg(AI);
1162  int FI = getOrCreateFrameIndex(AI);
1163  MIRBuilder.buildFrameIndex(Res, FI);
1164  return true;
1165  }
1166 
1167  // FIXME: support stack probing for Windows.
1168  if (MF->getTarget().getTargetTriple().isOSWindows())
1169  return false;
1170 
1171  // Now we're in the harder dynamic case.
1172  Type *Ty = AI.getAllocatedType();
1173  unsigned Align =
1174  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1175 
1176  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1177 
1178  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1179  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1180  if (MRI->getType(NumElts) != IntPtrTy) {
1181  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1182  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1183  NumElts = ExtElts;
1184  }
1185 
1186  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1187  unsigned TySize =
1188  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1189  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1190 
1191  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1192  auto &TLI = *MF->getSubtarget().getTargetLowering();
1193  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1194 
1195  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1196  MIRBuilder.buildCopy(SPTmp, SPReg);
1197 
1198  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1199  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1200 
1201  // Handle alignment. We have to realign if the allocation granule was smaller
1202  // than stack alignment, or the specific alloca requires more than stack
1203  // alignment.
1204  unsigned StackAlign =
1206  Align = std::max(Align, StackAlign);
1207  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1208  // Round the size of the allocation up to the stack alignment size
1209  // by add SA-1 to the size. This doesn't overflow because we're computing
1210  // an address inside an alloca.
1211  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1212  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1213  AllocTmp = AlignedAlloc;
1214  }
1215 
1216  MIRBuilder.buildCopy(SPReg, AllocTmp);
1217  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1218 
1219  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1221  return true;
1222 }
1223 
1224 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1225  // FIXME: We may need more info about the type. Because of how LLT works,
1226  // we're completely discarding the i64/double distinction here (amongst
1227  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1228  // anyway but that's not guaranteed.
1229  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1230  .addDef(getOrCreateVReg(U))
1231  .addUse(getOrCreateVReg(*U.getOperand(0)))
1232  .addImm(DL->getABITypeAlignment(U.getType()));
1233  return true;
1234 }
1235 
1236 bool IRTranslator::translateInsertElement(const User &U,
1237  MachineIRBuilder &MIRBuilder) {
1238  // If it is a <1 x Ty> vector, use the scalar as it is
1239  // not a legal vector type in LLT.
1240  if (U.getType()->getVectorNumElements() == 1) {
1241  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1242  auto &Regs = *VMap.getVRegs(U);
1243  if (Regs.empty()) {
1244  Regs.push_back(Elt);
1245  VMap.getOffsets(U)->push_back(0);
1246  } else {
1247  MIRBuilder.buildCopy(Regs[0], Elt);
1248  }
1249  return true;
1250  }
1251 
1252  unsigned Res = getOrCreateVReg(U);
1253  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1254  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1255  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1256  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1257  return true;
1258 }
1259 
1260 bool IRTranslator::translateExtractElement(const User &U,
1261  MachineIRBuilder &MIRBuilder) {
1262  // If it is a <1 x Ty> vector, use the scalar as it is
1263  // not a legal vector type in LLT.
1264  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1265  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1266  auto &Regs = *VMap.getVRegs(U);
1267  if (Regs.empty()) {
1268  Regs.push_back(Elt);
1269  VMap.getOffsets(U)->push_back(0);
1270  } else {
1271  MIRBuilder.buildCopy(Regs[0], Elt);
1272  }
1273  return true;
1274  }
1275  unsigned Res = getOrCreateVReg(U);
1276  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1277  unsigned Idx = getOrCreateVReg(*U.getOperand(1));
1278  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1279  return true;
1280 }
1281 
1282 bool IRTranslator::translateShuffleVector(const User &U,
1283  MachineIRBuilder &MIRBuilder) {
1284  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1285  .addDef(getOrCreateVReg(U))
1286  .addUse(getOrCreateVReg(*U.getOperand(0)))
1287  .addUse(getOrCreateVReg(*U.getOperand(1)))
1288  .addUse(getOrCreateVReg(*U.getOperand(2)));
1289  return true;
1290 }
1291 
1292 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1293  const PHINode &PI = cast<PHINode>(U);
1294 
1296  for (auto Reg : getOrCreateVRegs(PI)) {
1297  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg);
1298  Insts.push_back(MIB.getInstr());
1299  }
1300 
1301  PendingPHIs.emplace_back(&PI, std::move(Insts));
1302  return true;
1303 }
1304 
1305 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1306  MachineIRBuilder &MIRBuilder) {
1307  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1308 
1309  if (I.isWeak())
1310  return false;
1311 
1312  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1315 
1316  Type *ResType = I.getType();
1317  Type *ValType = ResType->Type::getStructElementType(0);
1318 
1319  auto Res = getOrCreateVRegs(I);
1320  unsigned OldValRes = Res[0];
1321  unsigned SuccessRes = Res[1];
1322  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1323  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1324  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1325 
1326  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1327  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1329  Flags, DL->getTypeStoreSize(ValType),
1330  getMemOpAlignment(I), AAMDNodes(), nullptr,
1332  I.getFailureOrdering()));
1333  return true;
1334 }
1335 
1336 bool IRTranslator::translateAtomicRMW(const User &U,
1337  MachineIRBuilder &MIRBuilder) {
1338  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1339 
1340  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1343 
1344  Type *ResType = I.getType();
1345 
1346  unsigned Res = getOrCreateVReg(I);
1347  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1348  unsigned Val = getOrCreateVReg(*I.getValOperand());
1349 
1350  unsigned Opcode = 0;
1351  switch (I.getOperation()) {
1352  default:
1353  llvm_unreachable("Unknown atomicrmw op");
1354  return false;
1355  case AtomicRMWInst::Xchg:
1356  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1357  break;
1358  case AtomicRMWInst::Add:
1359  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1360  break;
1361  case AtomicRMWInst::Sub:
1362  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1363  break;
1364  case AtomicRMWInst::And:
1365  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1366  break;
1367  case AtomicRMWInst::Nand:
1368  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1369  break;
1370  case AtomicRMWInst::Or:
1371  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1372  break;
1373  case AtomicRMWInst::Xor:
1374  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1375  break;
1376  case AtomicRMWInst::Max:
1377  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1378  break;
1379  case AtomicRMWInst::Min:
1380  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1381  break;
1382  case AtomicRMWInst::UMax:
1383  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1384  break;
1385  case AtomicRMWInst::UMin:
1386  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1387  break;
1388  }
1389 
1390  MIRBuilder.buildAtomicRMW(
1391  Opcode, Res, Addr, Val,
1393  Flags, DL->getTypeStoreSize(ResType),
1394  getMemOpAlignment(I), AAMDNodes(), nullptr,
1395  I.getSyncScopeID(), I.getOrdering()));
1396  return true;
1397 }
1398 
1399 void IRTranslator::finishPendingPhis() {
1400  for (auto &Phi : PendingPHIs) {
1401  const PHINode *PI = Phi.first;
1402  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1403 
1404  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1405  // won't create extra control flow here, otherwise we need to find the
1406  // dominating predecessor here (or perhaps force the weirder IRTranslators
1407  // to provide a simple boundary).
1408  SmallSet<const BasicBlock *, 4> HandledPreds;
1409 
1410  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1411  auto IRPred = PI->getIncomingBlock(i);
1412  if (HandledPreds.count(IRPred))
1413  continue;
1414 
1415  HandledPreds.insert(IRPred);
1416  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1417  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1418  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1419  "incorrect CFG at MachineBasicBlock level");
1420  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1421  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1422  MIB.addUse(ValRegs[j]);
1423  MIB.addMBB(Pred);
1424  }
1425  }
1426  }
1427  }
1428 }
1429 
1430 bool IRTranslator::valueIsSplit(const Value &V,
1432  SmallVector<LLT, 4> SplitTys;
1433  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1434  return SplitTys.size() > 1;
1435 }
1436 
1437 bool IRTranslator::translate(const Instruction &Inst) {
1438  CurBuilder.setDebugLoc(Inst.getDebugLoc());
1439  switch(Inst.getOpcode()) {
1440 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1441  case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1442 #include "llvm/IR/Instruction.def"
1443  default:
1444  return false;
1445  }
1446 }
1447 
1448 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1449  if (auto CI = dyn_cast<ConstantInt>(&C))
1450  EntryBuilder.buildConstant(Reg, *CI);
1451  else if (auto CF = dyn_cast<ConstantFP>(&C))
1452  EntryBuilder.buildFConstant(Reg, *CF);
1453  else if (isa<UndefValue>(C))
1454  EntryBuilder.buildUndef(Reg);
1455  else if (isa<ConstantPointerNull>(C)) {
1456  // As we are trying to build a constant val of 0 into a pointer,
1457  // insert a cast to make them correct with respect to types.
1458  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1459  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1460  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1461  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1462  EntryBuilder.buildCast(Reg, ZeroReg);
1463  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1464  EntryBuilder.buildGlobalValue(Reg, GV);
1465  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1466  if (!CAZ->getType()->isVectorTy())
1467  return false;
1468  // Return the scalar if it is a <1 x Ty> vector.
1469  if (CAZ->getNumElements() == 1)
1470  return translate(*CAZ->getElementValue(0u), Reg);
1471  std::vector<unsigned> Ops;
1472  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1473  Constant &Elt = *CAZ->getElementValue(i);
1474  Ops.push_back(getOrCreateVReg(Elt));
1475  }
1476  EntryBuilder.buildMerge(Reg, Ops);
1477  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1478  // Return the scalar if it is a <1 x Ty> vector.
1479  if (CV->getNumElements() == 1)
1480  return translate(*CV->getElementAsConstant(0), Reg);
1481  std::vector<unsigned> Ops;
1482  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1483  Constant &Elt = *CV->getElementAsConstant(i);
1484  Ops.push_back(getOrCreateVReg(Elt));
1485  }
1486  EntryBuilder.buildMerge(Reg, Ops);
1487  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1488  switch(CE->getOpcode()) {
1489 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1490  case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1491 #include "llvm/IR/Instruction.def"
1492  default:
1493  return false;
1494  }
1495  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1496  if (CV->getNumOperands() == 1)
1497  return translate(*CV->getOperand(0), Reg);
1499  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1500  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1501  }
1502  EntryBuilder.buildMerge(Reg, Ops);
1503  } else
1504  return false;
1505 
1506  return true;
1507 }
1508 
1509 void IRTranslator::finalizeFunction() {
1510  // Release the memory used by the different maps we
1511  // needed during the translation.
1512  PendingPHIs.clear();
1513  VMap.reset();
1514  FrameIndices.clear();
1515  MachinePreds.clear();
1516  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1517  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1518  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1519  EntryBuilder = MachineIRBuilder();
1520  CurBuilder = MachineIRBuilder();
1521 }
1522 
1524  MF = &CurMF;
1525  const Function &F = MF->getFunction();
1526  if (F.empty())
1527  return false;
1528  CLI = MF->getSubtarget().getCallLowering();
1529  CurBuilder.setMF(*MF);
1530  EntryBuilder.setMF(*MF);
1531  MRI = &MF->getRegInfo();
1532  DL = &F.getParent()->getDataLayout();
1533  TPC = &getAnalysis<TargetPassConfig>();
1534  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1535 
1536  assert(PendingPHIs.empty() && "stale PHIs");
1537 
1538  if (!DL->isLittleEndian()) {
1539  // Currently we don't properly handle big endian code.
1540  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1541  F.getSubprogram(), &F.getEntryBlock());
1542  R << "unable to translate in big endian mode";
1543  reportTranslationError(*MF, *TPC, *ORE, R);
1544  }
1545 
1546  // Release the per-function state when we return, whether we succeeded or not.
1547  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1548 
1549  // Setup a separate basic-block for the arguments and constants
1550  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1551  MF->push_back(EntryBB);
1552  EntryBuilder.setMBB(*EntryBB);
1553 
1554  // Create all blocks, in IR order, to preserve the layout.
1555  for (const BasicBlock &BB: F) {
1556  auto *&MBB = BBToMBB[&BB];
1557 
1558  MBB = MF->CreateMachineBasicBlock(&BB);
1559  MF->push_back(MBB);
1560 
1561  if (BB.hasAddressTaken())
1562  MBB->setHasAddressTaken();
1563  }
1564 
1565  // Make our arguments/constants entry block fallthrough to the IR entry block.
1566  EntryBB->addSuccessor(&getMBB(F.front()));
1567 
1568  // Lower the actual args into this basic block.
1569  SmallVector<unsigned, 8> VRegArgs;
1570  for (const Argument &Arg: F.args()) {
1571  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1572  continue; // Don't handle zero sized types.
1573  VRegArgs.push_back(
1575  }
1576 
1577  if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1578  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1579  F.getSubprogram(), &F.getEntryBlock());
1580  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1581  reportTranslationError(*MF, *TPC, *ORE, R);
1582  return false;
1583  }
1584 
1585  auto ArgIt = F.arg_begin();
1586  for (auto &VArg : VRegArgs) {
1587  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1588  // creating redundant copies.
1589  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1590  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1591  assert(VRegs.empty() && "VRegs already populated?");
1592  VRegs.push_back(VArg);
1593  } else {
1594  unpackRegs(*ArgIt, VArg, EntryBuilder);
1595  }
1596  ArgIt++;
1597  }
1598 
1599  // And translate the function!
1600  for (const BasicBlock &BB : F) {
1601  MachineBasicBlock &MBB = getMBB(BB);
1602  // Set the insertion point of all the following translations to
1603  // the end of this basic block.
1604  CurBuilder.setMBB(MBB);
1605 
1606  for (const Instruction &Inst : BB) {
1607  if (translate(Inst))
1608  continue;
1609 
1610  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1611  Inst.getDebugLoc(), &BB);
1612  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1613 
1614  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1615  std::string InstStrStorage;
1616  raw_string_ostream InstStr(InstStrStorage);
1617  InstStr << Inst;
1618 
1619  R << ": '" << InstStr.str() << "'";
1620  }
1621 
1622  reportTranslationError(*MF, *TPC, *ORE, R);
1623  return false;
1624  }
1625  }
1626 
1627  finishPendingPhis();
1628 
1629  // Merge the argument lowering and constants block with its single
1630  // successor, the LLVM-IR entry block. We want the basic block to
1631  // be maximal.
1632  assert(EntryBB->succ_size() == 1 &&
1633  "Custom BB used for lowering should have only one successor");
1634  // Get the successor of the current entry block.
1635  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1636  assert(NewEntryBB.pred_size() == 1 &&
1637  "LLVM-IR entry block has a predecessor!?");
1638  // Move all the instruction from the current entry block to the
1639  // new entry block.
1640  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1641  EntryBB->end());
1642 
1643  // Update the live-in information for the new entry block.
1644  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1645  NewEntryBB.addLiveIn(LiveIn);
1646  NewEntryBB.sortUniqueLiveIns();
1647 
1648  // Get rid of the now empty basic block.
1649  EntryBB->removeSuccessor(&NewEntryBB);
1650  MF->remove(EntryBB);
1651  MF->DeleteMachineBasicBlock(EntryBB);
1652 
1653  assert(&MF->front() == &NewEntryBB &&
1654  "New entry wasn't next in the list of basic block!");
1655 
1656  // Initialize stack protector information.
1657  StackProtector &SP = getAnalysis<StackProtector>();
1658  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1659 
1660  return false;
1661 }
MachineInstrBuilder buildCopy(unsigned Res, unsigned Op)
Build and insert Res = COPY Op.
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:399
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:185
void push_back(const T &Elt)
Definition: SmallVector.h:213
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:875
bool empty() const
Definition: Function.h:648
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:173
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
*p = old <signed v ? old : v
Definition: Instructions.h:711
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
LLVMContext & Context
DiagnosticInfoOptimizationBase::Argument NV
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1399
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:238
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad. Returns the label ID for the landing pad entry.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
iterator begin() const
Definition: ArrayRef.h:137
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val, unsigned Elt, unsigned Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:518
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB)
Extract the exception handling information from the landingpad instruction and add them to the specif...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:588
MachineInstrBuilder buildFConstant(DstType &&Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildUndef(DstType &&Res)
Build and insert Res = IMPLICIT_DEF.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
virtual const TargetLowering * getTargetLowering() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:374
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:81
*p = old <unsigned v ? old : v
Definition: Instructions.h:715
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:237
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:940
*p = old >unsigned v ? old : v
Definition: Instructions.h:713
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
static const MCPhysReg VRegs[32]
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:714
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:261
BasicBlock * getSuccessor(unsigned i) const
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:360
unsigned const TargetRegisterInfo * TRI
F(f)
An instruction for reading from memory.
Definition: Instructions.h:168
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:681
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:118
*p = old >signed v ? old : v
Definition: Instructions.h:709
void DeleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
unsigned getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
Definition: DataLayout.cpp:611
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:268
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:585
bool hasSideEffects() const
Definition: InlineAsm.h:67
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:521
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:221
A description of a memory reference used in the backend.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
#define DEBUG_TYPE
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
Definition: CallLowering.h:159
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:113
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Class to represent struct types.
Definition: DerivedTypes.h:201
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
void setMF(MachineFunction &)
BinOp getOperation() const
Definition: Instructions.h:734
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:559
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:55
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:547
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:628
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Context object for machine code objects.
Definition: MCContext.h:63
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:572
const RegList & Regs
Class to represent array types.
Definition: DerivedTypes.h:369
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:126
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1004
An instruction for storing to memory.
Definition: Instructions.h:310
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Value * getOperand(unsigned i) const
Definition: User.h:170
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Value * getOperand(unsigned i_nocapture) const
MCContext & getContext() const
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:562
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
const BasicBlock & getEntryBlock() const
Definition: Function.h:626
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:610
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:742
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
succ_range successors()
Definition: InstrTypes.h:268
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:774
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:185
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1508
Conditional or Unconditional Branch instruction.
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
Value * getValue() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:36
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:276
Indirect Branch Instruction.
BasicBlock * getDefaultDest() const
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:732
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index)
Build and insert `Res0, ...
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:80
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:885
virtual const CallLowering * getCallLowering() const
const Triple & getTargetTriple() const
DIExpression * getExpression() const
Definition: IntrinsicInst.h:84
Value * getPointerOperand()
Definition: Instructions.h:274
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val, unsigned Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
void remove(iterator MBBI)
self_iterator getIterator()
Definition: ilist_node.h:82
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:81
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:322
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:902
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:598
iterator_range< User::op_iterator > arg_operands()
Iteration adapter for range-for loops.
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:761
This class contains a discriminated union of information about pointers in memory operands...
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:493
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:82
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:106
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:633
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:387
Value * getValOperand()
Definition: Instructions.h:787
MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:88
unsigned getNumOperands() const
Definition: User.h:192
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
iterator end() const
Definition: ArrayRef.h:138
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:722
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:258
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:180
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:621
unsigned getNumIncomingValues() const
Return the number of incoming edges.
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst, unsigned Op0, unsigned Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:180
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
bool isIntPredicate() const
Definition: InstrTypes.h:977
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:61
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
amdgpu Simplify well known AMD library false Value Value * Arg
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
The memory access reads data.
#define Success
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:560
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineInstrBuilder buildMerge(unsigned Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:959
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:343
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:290
bool isInlineAsm() const
Check if this call is an inline asm statement.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:543
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:653
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:362
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:194
Value * getPointerOperand()
Definition: Instructions.h:783
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
TargetOptions Options
Definition: TargetMachine.h:98
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:249
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:748
Pair of physical register and lane mask.
virtual const TargetFrameLowering * getFrameLowering() const
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
bool isUnconditional() const
MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg)
Build and insert an appropriate cast between two registers of equal size.
static void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
AsmDialect getDialect() const
Definition: InlineAsm.h:69
Multiway switch.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:477
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1288
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:411
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
This file describes how to lower LLVM calls to machine code calls.
void push_back(MachineBasicBlock *MBB)
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
MachineBasicBlock & getMBB()
Getter for the basic block we currently build.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArgsTy &&... Args)
DAG like Generic method for building arbitrary instructions as above.
IRTranslator LLVM IR MI
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildMul(unsigned Dst, unsigned Src0, unsigned Src1)
Build and insert Res = G_MUL Op0, Op1.
void setRegClass(unsigned Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
#define LLVM_DEBUG(X)
Definition: Debug.h:119
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, unsigned VReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
Definition: CallLowering.h:145
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:960
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable.
int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created. ...
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:402
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num)
allocateMemRefsArray - Allocate an array to hold MachineMemOperand pointers.
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:193
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:779
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:746
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:887
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:67
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:65