LLVM  8.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfo.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InlineAsm.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Intrinsics.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/User.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCContext.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/CodeGen.h"
59 #include "llvm/Support/Debug.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <iterator>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 #define DEBUG_TYPE "irtranslator"
75 
76 using namespace llvm;
77 
78 char IRTranslator::ID = 0;
79 
80 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
81  false, false)
83 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
84  false, false)
85 
91 
92  // Print the function name explicitly if we don't have a debug location (which
93  // makes the diagnostic less useful) or if we're going to emit a raw error.
94  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
95  R << (" (in function: " + MF.getName() + ")").str();
96 
97  if (TPC.isGlobalISelAbortEnabled())
98  report_fatal_error(R.getMsg());
99  else
100  ORE.emit(R);
101 }
102 
105 }
106 
107 #ifndef NDEBUG
108 /// Verify that every instruction created has the same DILocation as the
109 /// instruction being translated.
111  MachineFunction &MF;
112  const Instruction *CurrInst = nullptr;
113 
114 public:
115  DILocationVerifier(MachineFunction &MF) : MF(MF) { MF.setDelegate(this); }
117 
118  const Instruction *getCurrentInst() const { return CurrInst; }
119  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
120 
121  void MF_HandleInsertion(const MachineInstr &MI) override {
122  assert(getCurrentInst() && "Inserted instruction without a current MI");
123 
124  // Only print the check message if we're actually checking it.
125 #ifndef NDEBUG
126  LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
127  << " was copied to " << MI);
128 #endif
129  assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
130  "Line info was not transferred to all instructions");
131  }
132  void MF_HandleRemoval(const MachineInstr &MI) override {}
133 };
134 #endif // ifndef NDEBUG
135 
136 
142 }
143 
144 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
145  SmallVectorImpl<LLT> &ValueTys,
147  uint64_t StartingOffset = 0) {
148  // Given a struct type, recursively traverse the elements.
149  if (StructType *STy = dyn_cast<StructType>(&Ty)) {
150  const StructLayout *SL = DL.getStructLayout(STy);
151  for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
152  computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
153  StartingOffset + SL->getElementOffset(I));
154  return;
155  }
156  // Given an array type, recursively traverse the elements.
157  if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
158  Type *EltTy = ATy->getElementType();
159  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
160  for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
161  computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
162  StartingOffset + i * EltSize);
163  return;
164  }
165  // Interpret void as zero return values.
166  if (Ty.isVoidTy())
167  return;
168  // Base case: we can get an LLT for this LLVM IR type.
169  ValueTys.push_back(getLLTForType(Ty, DL));
170  if (Offsets != nullptr)
171  Offsets->push_back(StartingOffset * 8);
172 }
173 
175 IRTranslator::allocateVRegs(const Value &Val) {
176  assert(!VMap.contains(Val) && "Value already allocated in VMap");
177  auto *Regs = VMap.getVRegs(Val);
178  auto *Offsets = VMap.getOffsets(Val);
179  SmallVector<LLT, 4> SplitTys;
180  computeValueLLTs(*DL, *Val.getType(), SplitTys,
181  Offsets->empty() ? Offsets : nullptr);
182  for (unsigned i = 0; i < SplitTys.size(); ++i)
183  Regs->push_back(0);
184  return *Regs;
185 }
186 
187 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
188  auto VRegsIt = VMap.findVRegs(Val);
189  if (VRegsIt != VMap.vregs_end())
190  return *VRegsIt->second;
191 
192  if (Val.getType()->isVoidTy())
193  return *VMap.getVRegs(Val);
194 
195  // Create entry for this type.
196  auto *VRegs = VMap.getVRegs(Val);
197  auto *Offsets = VMap.getOffsets(Val);
198 
199  assert(Val.getType()->isSized() &&
200  "Don't know how to create an empty vreg");
201 
202  SmallVector<LLT, 4> SplitTys;
203  computeValueLLTs(*DL, *Val.getType(), SplitTys,
204  Offsets->empty() ? Offsets : nullptr);
205 
206  if (!isa<Constant>(Val)) {
207  for (auto Ty : SplitTys)
208  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
209  return *VRegs;
210  }
211 
212  if (Val.getType()->isAggregateType()) {
213  // UndefValue, ConstantAggregateZero
214  auto &C = cast<Constant>(Val);
215  unsigned Idx = 0;
216  while (auto Elt = C.getAggregateElement(Idx++)) {
217  auto EltRegs = getOrCreateVRegs(*Elt);
218  llvm::copy(EltRegs, std::back_inserter(*VRegs));
219  }
220  } else {
221  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
222  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
223  bool Success = translate(cast<Constant>(Val), VRegs->front());
224  if (!Success) {
225  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
226  MF->getFunction().getSubprogram(),
227  &MF->getFunction().getEntryBlock());
228  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
229  reportTranslationError(*MF, *TPC, *ORE, R);
230  return *VRegs;
231  }
232  }
233 
234  return *VRegs;
235 }
236 
237 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
238  if (FrameIndices.find(&AI) != FrameIndices.end())
239  return FrameIndices[&AI];
240 
241  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
242  unsigned Size =
243  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
244 
245  // Always allocate at least one byte.
246  Size = std::max(Size, 1u);
247 
248  unsigned Alignment = AI.getAlignment();
249  if (!Alignment)
250  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
251 
252  int &FI = FrameIndices[&AI];
253  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
254  return FI;
255 }
256 
257 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
258  unsigned Alignment = 0;
259  Type *ValTy = nullptr;
260  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
261  Alignment = SI->getAlignment();
262  ValTy = SI->getValueOperand()->getType();
263  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
264  Alignment = LI->getAlignment();
265  ValTy = LI->getType();
266  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
267  // TODO(PR27168): This instruction has no alignment attribute, but unlike
268  // the default alignment for load/store, the default here is to assume
269  // it has NATURAL alignment, not DataLayout-specified alignment.
270  const DataLayout &DL = AI->getModule()->getDataLayout();
271  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
272  ValTy = AI->getCompareOperand()->getType();
273  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
274  // TODO(PR27168): This instruction has no alignment attribute, but unlike
275  // the default alignment for load/store, the default here is to assume
276  // it has NATURAL alignment, not DataLayout-specified alignment.
277  const DataLayout &DL = AI->getModule()->getDataLayout();
278  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
279  ValTy = AI->getType();
280  } else {
281  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
282  R << "unable to translate memop: " << ore::NV("Opcode", &I);
283  reportTranslationError(*MF, *TPC, *ORE, R);
284  return 1;
285  }
286 
287  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
288 }
289 
290 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
291  MachineBasicBlock *&MBB = BBToMBB[&BB];
292  assert(MBB && "BasicBlock was not encountered before");
293  return *MBB;
294 }
295 
296 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
297  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
298  MachinePreds[Edge].push_back(NewPred);
299 }
300 
301 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
302  MachineIRBuilder &MIRBuilder) {
303  // FIXME: handle signed/unsigned wrapping flags.
304 
305  // Get or create a virtual register for each value.
306  // Unless the value is a Constant => loadimm cst?
307  // or inline constant each time?
308  // Creation of a virtual register needs to have a size.
309  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
310  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
311  unsigned Res = getOrCreateVReg(U);
312  auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
313  if (isa<Instruction>(U)) {
314  MachineInstr *FBinOpMI = FBinOp.getInstr();
315  const Instruction &I = cast<Instruction>(U);
316  FBinOpMI->copyIRFlags(I);
317  }
318  return true;
319 }
320 
321 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
322  // -0.0 - X --> G_FNEG
323  if (isa<Constant>(U.getOperand(0)) &&
325  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
326  .addDef(getOrCreateVReg(U))
327  .addUse(getOrCreateVReg(*U.getOperand(1)));
328  return true;
329  }
330  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
331 }
332 
333 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
334  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
335  .addDef(getOrCreateVReg(U))
336  .addUse(getOrCreateVReg(*U.getOperand(1)));
337  return true;
338 }
339 
340 bool IRTranslator::translateCompare(const User &U,
341  MachineIRBuilder &MIRBuilder) {
342  const CmpInst *CI = dyn_cast<CmpInst>(&U);
343  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
344  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
345  unsigned Res = getOrCreateVReg(U);
346  CmpInst::Predicate Pred =
347  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
348  cast<ConstantExpr>(U).getPredicate());
349  if (CmpInst::isIntPredicate(Pred))
350  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
351  else if (Pred == CmpInst::FCMP_FALSE)
352  MIRBuilder.buildCopy(
353  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
354  else if (Pred == CmpInst::FCMP_TRUE)
355  MIRBuilder.buildCopy(
356  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
357  else
358  MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
359 
360  return true;
361 }
362 
363 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
364  const ReturnInst &RI = cast<ReturnInst>(U);
365  const Value *Ret = RI.getReturnValue();
366  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
367  Ret = nullptr;
368 
370  if (Ret)
371  VRegs = getOrCreateVRegs(*Ret);
372 
373  // The target may mess up with the insertion point, but
374  // this is not important as a return is the last instruction
375  // of the block anyway.
376 
377  return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
378 }
379 
380 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
381  const BranchInst &BrInst = cast<BranchInst>(U);
382  unsigned Succ = 0;
383  if (!BrInst.isUnconditional()) {
384  // We want a G_BRCOND to the true BB followed by an unconditional branch.
385  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
386  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
387  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
388  MIRBuilder.buildBrCond(Tst, TrueBB);
389  }
390 
391  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
392  MachineBasicBlock &TgtBB = getMBB(BrTgt);
393  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
394 
395  // If the unconditional target is the layout successor, fallthrough.
396  if (!CurBB.isLayoutSuccessor(&TgtBB))
397  MIRBuilder.buildBr(TgtBB);
398 
399  // Link successors.
400  for (const BasicBlock *Succ : successors(&BrInst))
401  CurBB.addSuccessor(&getMBB(*Succ));
402  return true;
403 }
404 
405 bool IRTranslator::translateSwitch(const User &U,
406  MachineIRBuilder &MIRBuilder) {
407  // For now, just translate as a chain of conditional branches.
408  // FIXME: could we share most of the logic/code in
409  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
410  // At first sight, it seems most of the logic in there is independent of
411  // SelectionDAG-specifics and a lot of work went in to optimize switch
412  // lowering in there.
413 
414  const SwitchInst &SwInst = cast<SwitchInst>(U);
415  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
416  const BasicBlock *OrigBB = SwInst.getParent();
417 
418  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
419  for (auto &CaseIt : SwInst.cases()) {
420  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
421  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
422  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
423  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
424  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
425  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
426 
427  MIRBuilder.buildBrCond(Tst, TrueMBB);
428  CurMBB.addSuccessor(&TrueMBB);
429  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
430 
431  MachineBasicBlock *FalseMBB =
432  MF->CreateMachineBasicBlock(SwInst.getParent());
433  // Insert the comparison blocks one after the other.
434  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
435  MIRBuilder.buildBr(*FalseMBB);
436  CurMBB.addSuccessor(FalseMBB);
437 
438  MIRBuilder.setMBB(*FalseMBB);
439  }
440  // handle default case
441  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
442  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
443  MIRBuilder.buildBr(DefaultMBB);
444  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
445  CurMBB.addSuccessor(&DefaultMBB);
446  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
447 
448  return true;
449 }
450 
451 bool IRTranslator::translateIndirectBr(const User &U,
452  MachineIRBuilder &MIRBuilder) {
453  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
454 
455  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
456  MIRBuilder.buildBrIndirect(Tgt);
457 
458  // Link successors.
459  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
460  for (const BasicBlock *Succ : successors(&BrInst))
461  CurBB.addSuccessor(&getMBB(*Succ));
462 
463  return true;
464 }
465 
466 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
467  const LoadInst &LI = cast<LoadInst>(U);
468 
469  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
471  Flags |= MachineMemOperand::MOLoad;
472 
473  if (DL->getTypeStoreSize(LI.getType()) == 0)
474  return true;
475 
476  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
477  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
478  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
479 
480  for (unsigned i = 0; i < Regs.size(); ++i) {
481  unsigned Addr = 0;
482  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
483 
484  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
485  unsigned BaseAlign = getMemOpAlignment(LI);
486  auto MMO = MF->getMachineMemOperand(
487  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
488  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
489  LI.getSyncScopeID(), LI.getOrdering());
490  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
491  }
492 
493  return true;
494 }
495 
496 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
497  const StoreInst &SI = cast<StoreInst>(U);
498  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
501 
502  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
503  return true;
504 
505  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
506  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
507  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
508 
509  for (unsigned i = 0; i < Vals.size(); ++i) {
510  unsigned Addr = 0;
511  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
512 
513  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
514  unsigned BaseAlign = getMemOpAlignment(SI);
515  auto MMO = MF->getMachineMemOperand(
516  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
517  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
518  SI.getSyncScopeID(), SI.getOrdering());
519  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
520  }
521  return true;
522 }
523 
524 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
525  const Value *Src = U.getOperand(0);
527 
528  // getIndexedOffsetInType is designed for GEPs, so the first index is the
529  // usual array element rather than looking into the actual aggregate.
530  SmallVector<Value *, 1> Indices;
531  Indices.push_back(ConstantInt::get(Int32Ty, 0));
532 
533  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
534  for (auto Idx : EVI->indices())
535  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
536  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
537  for (auto Idx : IVI->indices())
538  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
539  } else {
540  for (unsigned i = 1; i < U.getNumOperands(); ++i)
541  Indices.push_back(U.getOperand(i));
542  }
543 
544  return 8 * static_cast<uint64_t>(
545  DL.getIndexedOffsetInType(Src->getType(), Indices));
546 }
547 
548 bool IRTranslator::translateExtractValue(const User &U,
549  MachineIRBuilder &MIRBuilder) {
550  const Value *Src = U.getOperand(0);
551  uint64_t Offset = getOffsetFromIndices(U, *DL);
552  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
553  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
554  unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
555  Offsets.begin();
556  auto &DstRegs = allocateVRegs(U);
557 
558  for (unsigned i = 0; i < DstRegs.size(); ++i)
559  DstRegs[i] = SrcRegs[Idx++];
560 
561  return true;
562 }
563 
564 bool IRTranslator::translateInsertValue(const User &U,
565  MachineIRBuilder &MIRBuilder) {
566  const Value *Src = U.getOperand(0);
567  uint64_t Offset = getOffsetFromIndices(U, *DL);
568  auto &DstRegs = allocateVRegs(U);
569  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
570  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
571  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
572  auto InsertedIt = InsertedRegs.begin();
573 
574  for (unsigned i = 0; i < DstRegs.size(); ++i) {
575  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
576  DstRegs[i] = *InsertedIt++;
577  else
578  DstRegs[i] = SrcRegs[i];
579  }
580 
581  return true;
582 }
583 
584 bool IRTranslator::translateSelect(const User &U,
585  MachineIRBuilder &MIRBuilder) {
586  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
587  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
588  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
589  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
590 
591  for (unsigned i = 0; i < ResRegs.size(); ++i)
592  MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
593 
594  return true;
595 }
596 
597 bool IRTranslator::translateBitCast(const User &U,
598  MachineIRBuilder &MIRBuilder) {
599  // If we're bitcasting to the source type, we can reuse the source vreg.
600  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
601  getLLTForType(*U.getType(), *DL)) {
602  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
603  auto &Regs = *VMap.getVRegs(U);
604  // If we already assigned a vreg for this bitcast, we can't change that.
605  // Emit a copy to satisfy the users we already emitted.
606  if (!Regs.empty())
607  MIRBuilder.buildCopy(Regs[0], SrcReg);
608  else {
609  Regs.push_back(SrcReg);
610  VMap.getOffsets(U)->push_back(0);
611  }
612  return true;
613  }
614  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
615 }
616 
617 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
618  MachineIRBuilder &MIRBuilder) {
619  unsigned Op = getOrCreateVReg(*U.getOperand(0));
620  unsigned Res = getOrCreateVReg(U);
621  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
622  return true;
623 }
624 
625 bool IRTranslator::translateGetElementPtr(const User &U,
626  MachineIRBuilder &MIRBuilder) {
627  // FIXME: support vector GEPs.
628  if (U.getType()->isVectorTy())
629  return false;
630 
631  Value &Op0 = *U.getOperand(0);
632  unsigned BaseReg = getOrCreateVReg(Op0);
633  Type *PtrIRTy = Op0.getType();
634  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
635  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
636  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
637 
638  int64_t Offset = 0;
639  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
640  GTI != E; ++GTI) {
641  const Value *Idx = GTI.getOperand();
642  if (StructType *StTy = GTI.getStructTypeOrNull()) {
643  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
644  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
645  continue;
646  } else {
647  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
648 
649  // If this is a scalar constant or a splat vector of constants,
650  // handle it quickly.
651  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
652  Offset += ElementSize * CI->getSExtValue();
653  continue;
654  }
655 
656  if (Offset != 0) {
657  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
658  unsigned OffsetReg =
659  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
660  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
661 
662  BaseReg = NewBaseReg;
663  Offset = 0;
664  }
665 
666  unsigned IdxReg = getOrCreateVReg(*Idx);
667  if (MRI->getType(IdxReg) != OffsetTy) {
668  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
669  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
670  IdxReg = NewIdxReg;
671  }
672 
673  // N = N + Idx * ElementSize;
674  // Avoid doing it for ElementSize of 1.
675  unsigned GepOffsetReg;
676  if (ElementSize != 1) {
677  unsigned ElementSizeReg =
678  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
679 
680  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
681  MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
682  } else
683  GepOffsetReg = IdxReg;
684 
685  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
686  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
687  BaseReg = NewBaseReg;
688  }
689  }
690 
691  if (Offset != 0) {
692  unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
693  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
694  return true;
695  }
696 
697  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
698  return true;
699 }
700 
701 bool IRTranslator::translateMemfunc(const CallInst &CI,
702  MachineIRBuilder &MIRBuilder,
703  unsigned ID) {
704  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
705  Type *DstTy = CI.getArgOperand(0)->getType();
706  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
707  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
708  return false;
709 
711  for (int i = 0; i < 3; ++i) {
712  const auto &Arg = CI.getArgOperand(i);
713  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
714  }
715 
716  const char *Callee;
717  switch (ID) {
718  case Intrinsic::memmove:
719  case Intrinsic::memcpy: {
720  Type *SrcTy = CI.getArgOperand(1)->getType();
721  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
722  return false;
723  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
724  break;
725  }
726  case Intrinsic::memset:
727  Callee = "memset";
728  break;
729  default:
730  return false;
731  }
732 
733  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
734  MachineOperand::CreateES(Callee),
735  CallLowering::ArgInfo(0, CI.getType()), Args);
736 }
737 
738 void IRTranslator::getStackGuard(unsigned DstReg,
739  MachineIRBuilder &MIRBuilder) {
740  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
741  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
742  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
743  MIB.addDef(DstReg);
744 
745  auto &TLI = *MF->getSubtarget().getTargetLowering();
746  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
747  if (!Global)
748  return;
749 
750  MachinePointerInfo MPInfo(Global);
754  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
755  DL->getPointerABIAlignment(0));
756  MIB.setMemRefs({MemRef});
757 }
758 
759 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
760  MachineIRBuilder &MIRBuilder) {
761  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
762  MIRBuilder.buildInstr(Op)
763  .addDef(ResRegs[0])
764  .addDef(ResRegs[1])
765  .addUse(getOrCreateVReg(*CI.getOperand(0)))
766  .addUse(getOrCreateVReg(*CI.getOperand(1)));
767 
768  return true;
769 }
770 
771 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
772  MachineIRBuilder &MIRBuilder) {
773  switch (ID) {
774  default:
775  break;
776  case Intrinsic::lifetime_start:
777  case Intrinsic::lifetime_end:
778  // Stack coloring is not enabled in O0 (which we care about now) so we can
779  // drop these. Make sure someone notices when we start compiling at higher
780  // opts though.
781  if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
782  return false;
783  return true;
784  case Intrinsic::dbg_declare: {
785  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
786  assert(DI.getVariable() && "Missing variable");
787 
788  const Value *Address = DI.getAddress();
789  if (!Address || isa<UndefValue>(Address)) {
790  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
791  return true;
792  }
793 
795  MIRBuilder.getDebugLoc()) &&
796  "Expected inlined-at fields to agree");
797  auto AI = dyn_cast<AllocaInst>(Address);
798  if (AI && AI->isStaticAlloca()) {
799  // Static allocas are tracked at the MF level, no need for DBG_VALUE
800  // instructions (in fact, they get ignored if they *do* exist).
801  MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
802  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
803  } else {
804  // A dbg.declare describes the address of a source variable, so lower it
805  // into an indirect DBG_VALUE.
806  MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
807  DI.getVariable(), DI.getExpression());
808  }
809  return true;
810  }
811  case Intrinsic::dbg_label: {
812  const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
813  assert(DI.getLabel() && "Missing label");
814 
816  MIRBuilder.getDebugLoc()) &&
817  "Expected inlined-at fields to agree");
818 
819  MIRBuilder.buildDbgLabel(DI.getLabel());
820  return true;
821  }
822  case Intrinsic::vaend:
823  // No target I know of cares about va_end. Certainly no in-tree target
824  // does. Simplest intrinsic ever!
825  return true;
826  case Intrinsic::vastart: {
827  auto &TLI = *MF->getSubtarget().getTargetLowering();
828  Value *Ptr = CI.getArgOperand(0);
829  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
830 
831  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
832  .addUse(getOrCreateVReg(*Ptr))
833  .addMemOperand(MF->getMachineMemOperand(
834  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
835  return true;
836  }
837  case Intrinsic::dbg_value: {
838  // This form of DBG_VALUE is target-independent.
839  const DbgValueInst &DI = cast<DbgValueInst>(CI);
840  const Value *V = DI.getValue();
842  MIRBuilder.getDebugLoc()) &&
843  "Expected inlined-at fields to agree");
844  if (!V) {
845  // Currently the optimizer can produce this; insert an undef to
846  // help debugging. Probably the optimizer should not do this.
847  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
848  } else if (const auto *CI = dyn_cast<Constant>(V)) {
849  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
850  } else {
851  unsigned Reg = getOrCreateVReg(*V);
852  // FIXME: This does not handle register-indirect values at offset 0. The
853  // direct/indirect thing shouldn't really be handled by something as
854  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
855  // pretty baked in right now.
856  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
857  }
858  return true;
859  }
860  case Intrinsic::uadd_with_overflow:
861  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
862  case Intrinsic::sadd_with_overflow:
863  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
864  case Intrinsic::usub_with_overflow:
865  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
866  case Intrinsic::ssub_with_overflow:
867  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
868  case Intrinsic::umul_with_overflow:
869  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
870  case Intrinsic::smul_with_overflow:
871  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
872  case Intrinsic::pow:
873  MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
874  .addDef(getOrCreateVReg(CI))
875  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
876  .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
877  return true;
878  case Intrinsic::exp:
879  MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
880  .addDef(getOrCreateVReg(CI))
881  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
882  return true;
883  case Intrinsic::exp2:
884  MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
885  .addDef(getOrCreateVReg(CI))
886  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
887  return true;
888  case Intrinsic::log:
889  MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
890  .addDef(getOrCreateVReg(CI))
891  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
892  return true;
893  case Intrinsic::log2:
894  MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
895  .addDef(getOrCreateVReg(CI))
896  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
897  return true;
898  case Intrinsic::fabs:
899  MIRBuilder.buildInstr(TargetOpcode::G_FABS)
900  .addDef(getOrCreateVReg(CI))
901  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
902  return true;
903  case Intrinsic::trunc:
904  MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
905  .addDef(getOrCreateVReg(CI))
906  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
907  return true;
908  case Intrinsic::round:
909  MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND)
910  .addDef(getOrCreateVReg(CI))
911  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
912  return true;
913  case Intrinsic::fma:
914  MIRBuilder.buildInstr(TargetOpcode::G_FMA)
915  .addDef(getOrCreateVReg(CI))
916  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
917  .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
918  .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
919  return true;
920  case Intrinsic::fmuladd: {
921  const TargetMachine &TM = MF->getTarget();
922  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
923  unsigned Dst = getOrCreateVReg(CI);
924  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
925  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
926  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
928  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
929  // TODO: Revisit this to see if we should move this part of the
930  // lowering to the combiner.
931  MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
932  } else {
933  LLT Ty = getLLTForType(*CI.getType(), *DL);
934  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
935  MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
936  }
937  return true;
938  }
939  case Intrinsic::memcpy:
940  case Intrinsic::memmove:
941  case Intrinsic::memset:
942  return translateMemfunc(CI, MIRBuilder, ID);
943  case Intrinsic::eh_typeid_for: {
945  unsigned Reg = getOrCreateVReg(CI);
946  unsigned TypeID = MF->getTypeIDFor(GV);
947  MIRBuilder.buildConstant(Reg, TypeID);
948  return true;
949  }
950  case Intrinsic::objectsize: {
951  // If we don't know by now, we're never going to know.
952  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
953 
954  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
955  return true;
956  }
957  case Intrinsic::is_constant:
958  // If this wasn't constant-folded away by now, then it's not a
959  // constant.
960  MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
961  return true;
962  case Intrinsic::stackguard:
963  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
964  return true;
965  case Intrinsic::stackprotector: {
966  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
967  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
968  getStackGuard(GuardVal, MIRBuilder);
969 
970  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
971  MIRBuilder.buildStore(
972  GuardVal, getOrCreateVReg(*Slot),
973  *MF->getMachineMemOperand(
975  getOrCreateFrameIndex(*Slot)),
977  PtrTy.getSizeInBits() / 8, 8));
978  return true;
979  }
980  case Intrinsic::cttz:
981  case Intrinsic::ctlz: {
982  ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
983  bool isTrailing = ID == Intrinsic::cttz;
984  unsigned Opcode = isTrailing
985  ? Cst->isZero() ? TargetOpcode::G_CTTZ
986  : TargetOpcode::G_CTTZ_ZERO_UNDEF
987  : Cst->isZero() ? TargetOpcode::G_CTLZ
988  : TargetOpcode::G_CTLZ_ZERO_UNDEF;
989  MIRBuilder.buildInstr(Opcode)
990  .addDef(getOrCreateVReg(CI))
991  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
992  return true;
993  }
994  case Intrinsic::ctpop: {
995  MIRBuilder.buildInstr(TargetOpcode::G_CTPOP)
996  .addDef(getOrCreateVReg(CI))
997  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
998  return true;
999  }
1000  case Intrinsic::invariant_start: {
1001  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1002  unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1003  MIRBuilder.buildUndef(Undef);
1004  return true;
1005  }
1006  case Intrinsic::invariant_end:
1007  return true;
1008  }
1009  return false;
1010 }
1011 
1012 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1013  MachineIRBuilder &MIRBuilder) {
1014  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1015  if (!IA.getConstraintString().empty())
1016  return false;
1017 
1018  unsigned ExtraInfo = 0;
1019  if (IA.hasSideEffects())
1020  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1021  if (IA.getDialect() == InlineAsm::AD_Intel)
1022  ExtraInfo |= InlineAsm::Extra_AsmDialect;
1023 
1025  .addExternalSymbol(IA.getAsmString().c_str())
1026  .addImm(ExtraInfo);
1027 
1028  return true;
1029 }
1030 
1031 unsigned IRTranslator::packRegs(const Value &V,
1032  MachineIRBuilder &MIRBuilder) {
1033  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1034  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1035  LLT BigTy = getLLTForType(*V.getType(), *DL);
1036 
1037  if (Regs.size() == 1)
1038  return Regs[0];
1039 
1040  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1041  MIRBuilder.buildUndef(Dst);
1042  for (unsigned i = 0; i < Regs.size(); ++i) {
1043  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1044  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1045  Dst = NewDst;
1046  }
1047  return Dst;
1048 }
1049 
1050 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1051  MachineIRBuilder &MIRBuilder) {
1052  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1053  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1054 
1055  for (unsigned i = 0; i < Regs.size(); ++i)
1056  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1057 }
1058 
1059 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1060  const CallInst &CI = cast<CallInst>(U);
1061  auto TII = MF->getTarget().getIntrinsicInfo();
1062  const Function *F = CI.getCalledFunction();
1063 
1064  // FIXME: support Windows dllimport function calls.
1065  if (F && F->hasDLLImportStorageClass())
1066  return false;
1067 
1068  if (CI.isInlineAsm())
1069  return translateInlineAsm(CI, MIRBuilder);
1070 
1072  if (F && F->isIntrinsic()) {
1073  ID = F->getIntrinsicID();
1074  if (TII && ID == Intrinsic::not_intrinsic)
1075  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1076  }
1077 
1078  bool IsSplitType = valueIsSplit(CI);
1079  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1080  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1081  getLLTForType(*CI.getType(), *DL))
1082  : getOrCreateVReg(CI);
1083 
1085  for (auto &Arg: CI.arg_operands())
1086  Args.push_back(packRegs(*Arg, MIRBuilder));
1087 
1088  MF->getFrameInfo().setHasCalls(true);
1089  bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1090  return getOrCreateVReg(*CI.getCalledValue());
1091  });
1092 
1093  if (IsSplitType)
1094  unpackRegs(CI, Res, MIRBuilder);
1095  return Success;
1096  }
1097 
1098  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1099 
1100  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1101  return true;
1102 
1103  unsigned Res = 0;
1104  if (!CI.getType()->isVoidTy()) {
1105  if (IsSplitType)
1106  Res =
1107  MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
1108  else
1109  Res = getOrCreateVReg(CI);
1110  }
1111  MachineInstrBuilder MIB =
1112  MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1113 
1114  for (auto &Arg : CI.arg_operands()) {
1115  // Some intrinsics take metadata parameters. Reject them.
1116  if (isa<MetadataAsValue>(Arg))
1117  return false;
1118  MIB.addUse(packRegs(*Arg, MIRBuilder));
1119  }
1120 
1121  if (IsSplitType)
1122  unpackRegs(CI, Res, MIRBuilder);
1123 
1124  // Add a MachineMemOperand if it is a target mem intrinsic.
1125  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1126  TargetLowering::IntrinsicInfo Info;
1127  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1128  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1129  uint64_t Size = Info.memVT.getStoreSize();
1130  MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1131  Info.flags, Size, Info.align));
1132  }
1133 
1134  return true;
1135 }
1136 
1137 bool IRTranslator::translateInvoke(const User &U,
1138  MachineIRBuilder &MIRBuilder) {
1139  const InvokeInst &I = cast<InvokeInst>(U);
1140  MCContext &Context = MF->getContext();
1141 
1142  const BasicBlock *ReturnBB = I.getSuccessor(0);
1143  const BasicBlock *EHPadBB = I.getSuccessor(1);
1144 
1145  const Value *Callee = I.getCalledValue();
1146  const Function *Fn = dyn_cast<Function>(Callee);
1147  if (isa<InlineAsm>(Callee))
1148  return false;
1149 
1150  // FIXME: support invoking patchpoint and statepoint intrinsics.
1151  if (Fn && Fn->isIntrinsic())
1152  return false;
1153 
1154  // FIXME: support whatever these are.
1156  return false;
1157 
1158  // FIXME: support Windows exception handling.
1159  if (!isa<LandingPadInst>(EHPadBB->front()))
1160  return false;
1161 
1162  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1163  // the region covered by the try.
1164  MCSymbol *BeginSymbol = Context.createTempSymbol();
1165  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1166 
1167  unsigned Res =
1168  MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1170  for (auto &Arg: I.arg_operands())
1171  Args.push_back(packRegs(*Arg, MIRBuilder));
1172 
1173  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1174  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1175  return false;
1176 
1177  unpackRegs(I, Res, MIRBuilder);
1178 
1179  MCSymbol *EndSymbol = Context.createTempSymbol();
1180  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1181 
1182  // FIXME: track probabilities.
1183  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1184  &ReturnMBB = getMBB(*ReturnBB);
1185  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1186  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1187  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1188  MIRBuilder.buildBr(ReturnMBB);
1189 
1190  return true;
1191 }
1192 
1193 bool IRTranslator::translateLandingPad(const User &U,
1194  MachineIRBuilder &MIRBuilder) {
1195  const LandingPadInst &LP = cast<LandingPadInst>(U);
1196 
1197  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1198 
1199  MBB.setIsEHPad();
1200 
1201  // If there aren't registers to copy the values into (e.g., during SjLj
1202  // exceptions), then don't bother.
1203  auto &TLI = *MF->getSubtarget().getTargetLowering();
1204  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1205  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1206  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1207  return true;
1208 
1209  // If landingpad's return type is token type, we don't create DAG nodes
1210  // for its exception pointer and selector value. The extraction of exception
1211  // pointer or selector value from token type landingpads is not currently
1212  // supported.
1213  if (LP.getType()->isTokenTy())
1214  return true;
1215 
1216  // Add a label to mark the beginning of the landing pad. Deletion of the
1217  // landing pad can thus be detected via the MachineModuleInfo.
1219  .addSym(MF->addLandingPad(&MBB));
1220 
1221  LLT Ty = getLLTForType(*LP.getType(), *DL);
1222  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1223  MIRBuilder.buildUndef(Undef);
1224 
1225  SmallVector<LLT, 2> Tys;
1226  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1227  Tys.push_back(getLLTForType(*Ty, *DL));
1228  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1229 
1230  // Mark exception register as live in.
1231  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1232  if (!ExceptionReg)
1233  return false;
1234 
1235  MBB.addLiveIn(ExceptionReg);
1236  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1237  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1238 
1239  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1240  if (!SelectorReg)
1241  return false;
1242 
1243  MBB.addLiveIn(SelectorReg);
1244  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1245  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1246  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1247 
1248  return true;
1249 }
1250 
1251 bool IRTranslator::translateAlloca(const User &U,
1252  MachineIRBuilder &MIRBuilder) {
1253  auto &AI = cast<AllocaInst>(U);
1254 
1255  if (AI.isSwiftError())
1256  return false;
1257 
1258  if (AI.isStaticAlloca()) {
1259  unsigned Res = getOrCreateVReg(AI);
1260  int FI = getOrCreateFrameIndex(AI);
1261  MIRBuilder.buildFrameIndex(Res, FI);
1262  return true;
1263  }
1264 
1265  // FIXME: support stack probing for Windows.
1266  if (MF->getTarget().getTargetTriple().isOSWindows())
1267  return false;
1268 
1269  // Now we're in the harder dynamic case.
1270  Type *Ty = AI.getAllocatedType();
1271  unsigned Align =
1272  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1273 
1274  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1275 
1276  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1277  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1278  if (MRI->getType(NumElts) != IntPtrTy) {
1279  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1280  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1281  NumElts = ExtElts;
1282  }
1283 
1284  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1285  unsigned TySize =
1286  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1287  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1288 
1289  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1290  auto &TLI = *MF->getSubtarget().getTargetLowering();
1291  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1292 
1293  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1294  MIRBuilder.buildCopy(SPTmp, SPReg);
1295 
1296  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1297  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1298 
1299  // Handle alignment. We have to realign if the allocation granule was smaller
1300  // than stack alignment, or the specific alloca requires more than stack
1301  // alignment.
1302  unsigned StackAlign =
1303  MF->getSubtarget().getFrameLowering()->getStackAlignment();
1304  Align = std::max(Align, StackAlign);
1305  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1306  // Round the size of the allocation up to the stack alignment size
1307  // by add SA-1 to the size. This doesn't overflow because we're computing
1308  // an address inside an alloca.
1309  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1310  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1311  AllocTmp = AlignedAlloc;
1312  }
1313 
1314  MIRBuilder.buildCopy(SPReg, AllocTmp);
1315  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1316 
1317  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1318  assert(MF->getFrameInfo().hasVarSizedObjects());
1319  return true;
1320 }
1321 
1322 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1323  // FIXME: We may need more info about the type. Because of how LLT works,
1324  // we're completely discarding the i64/double distinction here (amongst
1325  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1326  // anyway but that's not guaranteed.
1327  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1328  .addDef(getOrCreateVReg(U))
1329  .addUse(getOrCreateVReg(*U.getOperand(0)))
1330  .addImm(DL->getABITypeAlignment(U.getType()));
1331  return true;
1332 }
1333 
1334 bool IRTranslator::translateInsertElement(const User &U,
1335  MachineIRBuilder &MIRBuilder) {
1336  // If it is a <1 x Ty> vector, use the scalar as it is
1337  // not a legal vector type in LLT.
1338  if (U.getType()->getVectorNumElements() == 1) {
1339  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1340  auto &Regs = *VMap.getVRegs(U);
1341  if (Regs.empty()) {
1342  Regs.push_back(Elt);
1343  VMap.getOffsets(U)->push_back(0);
1344  } else {
1345  MIRBuilder.buildCopy(Regs[0], Elt);
1346  }
1347  return true;
1348  }
1349 
1350  unsigned Res = getOrCreateVReg(U);
1351  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1352  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1353  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1354  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1355  return true;
1356 }
1357 
1358 bool IRTranslator::translateExtractElement(const User &U,
1359  MachineIRBuilder &MIRBuilder) {
1360  // If it is a <1 x Ty> vector, use the scalar as it is
1361  // not a legal vector type in LLT.
1362  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1363  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1364  auto &Regs = *VMap.getVRegs(U);
1365  if (Regs.empty()) {
1366  Regs.push_back(Elt);
1367  VMap.getOffsets(U)->push_back(0);
1368  } else {
1369  MIRBuilder.buildCopy(Regs[0], Elt);
1370  }
1371  return true;
1372  }
1373  unsigned Res = getOrCreateVReg(U);
1374  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1375  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1376  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1377  unsigned Idx = 0;
1378  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1379  if (CI->getBitWidth() != PreferredVecIdxWidth) {
1380  APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1381  auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1382  Idx = getOrCreateVReg(*NewIdxCI);
1383  }
1384  }
1385  if (!Idx)
1386  Idx = getOrCreateVReg(*U.getOperand(1));
1387  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1388  const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1389  Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1390  }
1391  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1392  return true;
1393 }
1394 
1395 bool IRTranslator::translateShuffleVector(const User &U,
1396  MachineIRBuilder &MIRBuilder) {
1397  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1398  .addDef(getOrCreateVReg(U))
1399  .addUse(getOrCreateVReg(*U.getOperand(0)))
1400  .addUse(getOrCreateVReg(*U.getOperand(1)))
1401  .addUse(getOrCreateVReg(*U.getOperand(2)));
1402  return true;
1403 }
1404 
1405 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1406  const PHINode &PI = cast<PHINode>(U);
1407 
1409  for (auto Reg : getOrCreateVRegs(PI)) {
1410  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg);
1411  Insts.push_back(MIB.getInstr());
1412  }
1413 
1414  PendingPHIs.emplace_back(&PI, std::move(Insts));
1415  return true;
1416 }
1417 
1418 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1419  MachineIRBuilder &MIRBuilder) {
1420  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1421 
1422  if (I.isWeak())
1423  return false;
1424 
1425  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1428 
1429  Type *ResType = I.getType();
1430  Type *ValType = ResType->Type::getStructElementType(0);
1431 
1432  auto Res = getOrCreateVRegs(I);
1433  unsigned OldValRes = Res[0];
1434  unsigned SuccessRes = Res[1];
1435  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1436  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1437  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1438 
1439  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1440  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1441  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1442  Flags, DL->getTypeStoreSize(ValType),
1443  getMemOpAlignment(I), AAMDNodes(), nullptr,
1445  I.getFailureOrdering()));
1446  return true;
1447 }
1448 
1449 bool IRTranslator::translateAtomicRMW(const User &U,
1450  MachineIRBuilder &MIRBuilder) {
1451  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1452 
1453  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1456 
1457  Type *ResType = I.getType();
1458 
1459  unsigned Res = getOrCreateVReg(I);
1460  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1461  unsigned Val = getOrCreateVReg(*I.getValOperand());
1462 
1463  unsigned Opcode = 0;
1464  switch (I.getOperation()) {
1465  default:
1466  llvm_unreachable("Unknown atomicrmw op");
1467  return false;
1468  case AtomicRMWInst::Xchg:
1469  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1470  break;
1471  case AtomicRMWInst::Add:
1472  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1473  break;
1474  case AtomicRMWInst::Sub:
1475  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1476  break;
1477  case AtomicRMWInst::And:
1478  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1479  break;
1480  case AtomicRMWInst::Nand:
1481  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1482  break;
1483  case AtomicRMWInst::Or:
1484  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1485  break;
1486  case AtomicRMWInst::Xor:
1487  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1488  break;
1489  case AtomicRMWInst::Max:
1490  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1491  break;
1492  case AtomicRMWInst::Min:
1493  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1494  break;
1495  case AtomicRMWInst::UMax:
1496  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1497  break;
1498  case AtomicRMWInst::UMin:
1499  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1500  break;
1501  }
1502 
1503  MIRBuilder.buildAtomicRMW(
1504  Opcode, Res, Addr, Val,
1505  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1506  Flags, DL->getTypeStoreSize(ResType),
1507  getMemOpAlignment(I), AAMDNodes(), nullptr,
1508  I.getSyncScopeID(), I.getOrdering()));
1509  return true;
1510 }
1511 
1512 void IRTranslator::finishPendingPhis() {
1513 #ifndef NDEBUG
1515 #endif // ifndef NDEBUG
1516  for (auto &Phi : PendingPHIs) {
1517  const PHINode *PI = Phi.first;
1518  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1519  EntryBuilder.setDebugLoc(PI->getDebugLoc());
1520 #ifndef NDEBUG
1521  Verifier.setCurrentInst(PI);
1522 #endif // ifndef NDEBUG
1523 
1524  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1525  // won't create extra control flow here, otherwise we need to find the
1526  // dominating predecessor here (or perhaps force the weirder IRTranslators
1527  // to provide a simple boundary).
1528  SmallSet<const BasicBlock *, 4> HandledPreds;
1529 
1530  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1531  auto IRPred = PI->getIncomingBlock(i);
1532  if (HandledPreds.count(IRPred))
1533  continue;
1534 
1535  HandledPreds.insert(IRPred);
1536  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1537  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1538  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1539  "incorrect CFG at MachineBasicBlock level");
1540  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1541  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1542  MIB.addUse(ValRegs[j]);
1543  MIB.addMBB(Pred);
1544  }
1545  }
1546  }
1547  }
1548 }
1549 
1550 bool IRTranslator::valueIsSplit(const Value &V,
1552  SmallVector<LLT, 4> SplitTys;
1553  if (Offsets && !Offsets->empty())
1554  Offsets->clear();
1555  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1556  return SplitTys.size() > 1;
1557 }
1558 
1559 bool IRTranslator::translate(const Instruction &Inst) {
1560  CurBuilder.setDebugLoc(Inst.getDebugLoc());
1561  EntryBuilder.setDebugLoc(Inst.getDebugLoc());
1562  switch(Inst.getOpcode()) {
1563 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1564  case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1565 #include "llvm/IR/Instruction.def"
1566  default:
1567  return false;
1568  }
1569 }
1570 
1571 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1572  if (auto CI = dyn_cast<ConstantInt>(&C))
1573  EntryBuilder.buildConstant(Reg, *CI);
1574  else if (auto CF = dyn_cast<ConstantFP>(&C))
1575  EntryBuilder.buildFConstant(Reg, *CF);
1576  else if (isa<UndefValue>(C))
1577  EntryBuilder.buildUndef(Reg);
1578  else if (isa<ConstantPointerNull>(C)) {
1579  // As we are trying to build a constant val of 0 into a pointer,
1580  // insert a cast to make them correct with respect to types.
1581  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1582  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1583  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1584  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1585  EntryBuilder.buildCast(Reg, ZeroReg);
1586  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1587  EntryBuilder.buildGlobalValue(Reg, GV);
1588  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1589  if (!CAZ->getType()->isVectorTy())
1590  return false;
1591  // Return the scalar if it is a <1 x Ty> vector.
1592  if (CAZ->getNumElements() == 1)
1593  return translate(*CAZ->getElementValue(0u), Reg);
1594  std::vector<unsigned> Ops;
1595  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1596  Constant &Elt = *CAZ->getElementValue(i);
1597  Ops.push_back(getOrCreateVReg(Elt));
1598  }
1599  EntryBuilder.buildMerge(Reg, Ops);
1600  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1601  // Return the scalar if it is a <1 x Ty> vector.
1602  if (CV->getNumElements() == 1)
1603  return translate(*CV->getElementAsConstant(0), Reg);
1604  std::vector<unsigned> Ops;
1605  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1606  Constant &Elt = *CV->getElementAsConstant(i);
1607  Ops.push_back(getOrCreateVReg(Elt));
1608  }
1609  EntryBuilder.buildMerge(Reg, Ops);
1610  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1611  switch(CE->getOpcode()) {
1612 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1613  case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1614 #include "llvm/IR/Instruction.def"
1615  default:
1616  return false;
1617  }
1618  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1619  if (CV->getNumOperands() == 1)
1620  return translate(*CV->getOperand(0), Reg);
1622  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1623  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1624  }
1625  EntryBuilder.buildMerge(Reg, Ops);
1626  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1627  EntryBuilder.buildBlockAddress(Reg, BA);
1628  } else
1629  return false;
1630 
1631  return true;
1632 }
1633 
1634 void IRTranslator::finalizeFunction() {
1635  // Release the memory used by the different maps we
1636  // needed during the translation.
1637  PendingPHIs.clear();
1638  VMap.reset();
1639  FrameIndices.clear();
1640  MachinePreds.clear();
1641  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1642  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1643  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1644  EntryBuilder = MachineIRBuilder();
1645  CurBuilder = MachineIRBuilder();
1646 }
1647 
1649  MF = &CurMF;
1650  const Function &F = MF->getFunction();
1651  if (F.empty())
1652  return false;
1653  CLI = MF->getSubtarget().getCallLowering();
1654  CurBuilder.setMF(*MF);
1655  EntryBuilder.setMF(*MF);
1656  MRI = &MF->getRegInfo();
1657  DL = &F.getParent()->getDataLayout();
1658  TPC = &getAnalysis<TargetPassConfig>();
1659  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1660 
1661  assert(PendingPHIs.empty() && "stale PHIs");
1662 
1663  if (!DL->isLittleEndian()) {
1664  // Currently we don't properly handle big endian code.
1665  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1666  F.getSubprogram(), &F.getEntryBlock());
1667  R << "unable to translate in big endian mode";
1668  reportTranslationError(*MF, *TPC, *ORE, R);
1669  }
1670 
1671  // Release the per-function state when we return, whether we succeeded or not.
1672  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1673 
1674  // Setup a separate basic-block for the arguments and constants
1675  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1676  MF->push_back(EntryBB);
1677  EntryBuilder.setMBB(*EntryBB);
1678 
1679  // Create all blocks, in IR order, to preserve the layout.
1680  for (const BasicBlock &BB: F) {
1681  auto *&MBB = BBToMBB[&BB];
1682 
1683  MBB = MF->CreateMachineBasicBlock(&BB);
1684  MF->push_back(MBB);
1685 
1686  if (BB.hasAddressTaken())
1687  MBB->setHasAddressTaken();
1688  }
1689 
1690  // Make our arguments/constants entry block fallthrough to the IR entry block.
1691  EntryBB->addSuccessor(&getMBB(F.front()));
1692 
1693  // Lower the actual args into this basic block.
1694  SmallVector<unsigned, 8> VRegArgs;
1695  for (const Argument &Arg: F.args()) {
1696  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1697  continue; // Don't handle zero sized types.
1698  VRegArgs.push_back(
1699  MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1700  }
1701 
1702  // We don't currently support translating swifterror or swiftself functions.
1703  for (auto &Arg : F.args()) {
1704  if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1705  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1706  F.getSubprogram(), &F.getEntryBlock());
1707  R << "unable to lower arguments due to swifterror/swiftself: "
1708  << ore::NV("Prototype", F.getType());
1709  reportTranslationError(*MF, *TPC, *ORE, R);
1710  return false;
1711  }
1712  }
1713 
1714  if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1715  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1716  F.getSubprogram(), &F.getEntryBlock());
1717  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1718  reportTranslationError(*MF, *TPC, *ORE, R);
1719  return false;
1720  }
1721 
1722  auto ArgIt = F.arg_begin();
1723  for (auto &VArg : VRegArgs) {
1724  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1725  // creating redundant copies.
1726  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1727  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1728  assert(VRegs.empty() && "VRegs already populated?");
1729  VRegs.push_back(VArg);
1730  } else {
1731  unpackRegs(*ArgIt, VArg, EntryBuilder);
1732  }
1733  ArgIt++;
1734  }
1735 
1736  // Need to visit defs before uses when translating instructions.
1737  {
1739 #ifndef NDEBUG
1741 #endif // ifndef NDEBUG
1742  for (const BasicBlock *BB : RPOT) {
1743  MachineBasicBlock &MBB = getMBB(*BB);
1744  // Set the insertion point of all the following translations to
1745  // the end of this basic block.
1746  CurBuilder.setMBB(MBB);
1747 
1748  for (const Instruction &Inst : *BB) {
1749 #ifndef NDEBUG
1750  Verifier.setCurrentInst(&Inst);
1751 #endif // ifndef NDEBUG
1752  if (translate(Inst))
1753  continue;
1754 
1755  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1756  Inst.getDebugLoc(), BB);
1757  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1758 
1759  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1760  std::string InstStrStorage;
1761  raw_string_ostream InstStr(InstStrStorage);
1762  InstStr << Inst;
1763 
1764  R << ": '" << InstStr.str() << "'";
1765  }
1766 
1767  reportTranslationError(*MF, *TPC, *ORE, R);
1768  return false;
1769  }
1770  }
1771  }
1772 
1773  finishPendingPhis();
1774 
1775  // Merge the argument lowering and constants block with its single
1776  // successor, the LLVM-IR entry block. We want the basic block to
1777  // be maximal.
1778  assert(EntryBB->succ_size() == 1 &&
1779  "Custom BB used for lowering should have only one successor");
1780  // Get the successor of the current entry block.
1781  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1782  assert(NewEntryBB.pred_size() == 1 &&
1783  "LLVM-IR entry block has a predecessor!?");
1784  // Move all the instruction from the current entry block to the
1785  // new entry block.
1786  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1787  EntryBB->end());
1788 
1789  // Update the live-in information for the new entry block.
1790  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1791  NewEntryBB.addLiveIn(LiveIn);
1792  NewEntryBB.sortUniqueLiveIns();
1793 
1794  // Get rid of the now empty basic block.
1795  EntryBB->removeSuccessor(&NewEntryBB);
1796  MF->remove(EntryBB);
1797  MF->DeleteMachineBasicBlock(EntryBB);
1798 
1799  assert(&MF->front() == &NewEntryBB &&
1800  "New entry wasn't next in the list of basic block!");
1801 
1802  // Initialize stack protector information.
1803  StackProtector &SP = getAnalysis<StackProtector>();
1804  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1805 
1806  return false;
1807 }
MachineInstrBuilder buildCopy(unsigned Res, unsigned Op)
Build and insert Res = COPY Op.
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:399
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:199
void push_back(const T &Elt)
Definition: SmallVector.h:218
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:631
bool empty() const
Definition: Function.h:662
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:173
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
*p = old <signed v ? old : v
Definition: Instructions.h:711
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
LLVMContext & Context
DiagnosticInfoOptimizationBase::Argument NV
This represents the llvm.dbg.label instruction.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1156
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:238
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
iterator begin() const
Definition: ArrayRef.h:137
MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val, unsigned Elt, unsigned Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:518
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:588
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
MachineInstrBuilder buildUndef(DstType &&Res)
Build and insert Res = IMPLICIT_DEF.
unsigned getReg() const
getReg - Returns the register number.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:374
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:81
*p = old <unsigned v ? old : v
Definition: Instructions.h:715
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:136
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:237
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:977
*p = old >unsigned v ? old : v
Definition: Instructions.h:713
void setCurrentInst(const Instruction *Inst)
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
static const MCPhysReg VRegs[32]
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:714
const Instruction * getCurrentInst() const
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:262
BasicBlock * getSuccessor(unsigned i) const
unsigned const TargetRegisterInfo * TRI
F(f)
An instruction for reading from memory.
Definition: Instructions.h:168
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:681
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
void resetDelegate(Delegate *delegate)
Reset the currently registered delegate - otherwise assert.
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:118
*p = old >signed v ? old : v
Definition: Instructions.h:709
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:268
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:585
bool hasSideEffects() const
Definition: InlineAsm.h:67
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:521
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:221
A description of a memory reference used in the backend.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:364
#define DEBUG_TYPE
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:113
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Class to represent struct types.
Definition: DerivedTypes.h:201
DILabel * getLabel() const
BinOp getOperation() const
Definition: Instructions.h:734
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:559
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:55
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:547
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:656
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Context object for machine code objects.
Definition: MCContext.h:63
DILocationVerifier(MachineFunction &MF)
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
Definition: Lint.cpp:84
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:572
Class to represent array types.
Definition: DerivedTypes.h:369
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:126
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1144
An instruction for storing to memory.
Definition: Instructions.h:310
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
amdgpu Simplify well known AMD library false Value * Callee
Value * getOperand(unsigned i) const
Definition: User.h:170
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Value * getOperand(unsigned i_nocapture) const
void MF_HandleInsertion(const MachineInstr &MI) override
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
const BasicBlock & getEntryBlock() const
Definition: Function.h:640
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:610
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
void setDelegate(Delegate *delegate)
Set the delegate.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:776
unsigned const MachineRegisterInfo * MRI
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1508
Conditional or Unconditional Branch instruction.
Value * getAddress() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
Value * getValue() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:275
Indirect Branch Instruction.
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index)
Build and insert `Res0, ...
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:641
Value * getPointerOperand()
Definition: Instructions.h:274
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val, unsigned Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
self_iterator getIterator()
Definition: ilist_node.h:82
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:181
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:322
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:658
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:598
iterator_range< User::op_iterator > arg_operands()
Iteration adapter for range-for loops.
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
size_t size() const
Definition: SmallVector.h:53
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:763
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:499
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:82
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:106
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:661
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:386
Value * getValOperand()
Definition: Instructions.h:789
MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:88
unsigned getNumOperands() const
Definition: User.h:192
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
iterator end() const
Definition: ArrayRef.h:138
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:258
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:644
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:180
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:621
unsigned getNumIncomingValues() const
Return the number of incoming edges.
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst, unsigned Op0, unsigned Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:194
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
bool isIntPredicate() const
Definition: InstrTypes.h:734
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
Class for arbitrary precision integers.
Definition: APInt.h:70
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:61
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
amdgpu Simplify well known AMD library false Value Value * Arg
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
Representation of each machine instruction.
Definition: MachineInstr.h:64
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:716
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:343
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:311
bool isInlineAsm() const
Check if this call is an inline asm statement.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:543
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:652
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:362
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:194
verify safepoint Safepoint IR Verifier
Value * getPointerOperand()
Definition: Instructions.h:785
TargetOptions Options
Definition: TargetMachine.h:97
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:249
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:748
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
uint32_t Size
Definition: Profile.cpp:47
DILocalVariable * getVariable() const
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
bool isUnconditional() const
MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg)
Build and insert an appropriate cast between two registers of equal size.
static void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
AsmDialect getDialect() const
Definition: InlineAsm.h:69
Multiway switch.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
void MF_HandleRemoval(const MachineInstr &MI) override
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:483
aarch64 promote const
Verify that every instruction created has the same DILocation as the instruction being translated...
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:411
succ_range successors(Instruction *I)
Definition: CFG.h:262
This file describes how to lower LLVM calls to machine code calls.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
MachineBasicBlock & getMBB()
Getter for the basic block we currently build.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArgsTy &&... Args)
DAG like Generic method for building arbitrary instructions as above.
IRTranslator LLVM IR MI
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildMul(unsigned Dst, unsigned Src0, unsigned Src1)
Build and insert Res = G_MUL Op0, Op1.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1100
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:402
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:193
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:779
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:748
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:643
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:67
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:165