LLVM  9.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 #define DEBUG_TYPE "irtranslator"
76 
77 using namespace llvm;
78 
79 static cl::opt<bool>
80  EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81  cl::desc("Should enable CSE in irtranslator"),
82  cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
84 
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
86  false, false)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90  false, false)
91 
97 
98  // Print the function name explicitly if we don't have a debug location (which
99  // makes the diagnostic less useful) or if we're going to emit a raw error.
100  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101  R << (" (in function: " + MF.getName() + ")").str();
102 
103  if (TPC.isGlobalISelAbortEnabled())
104  report_fatal_error(R.getMsg());
105  else
106  ORE.emit(R);
107 }
108 
111 }
112 
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118  const Instruction *CurrInst = nullptr;
119 
120 public:
121  DILocationVerifier() = default;
122  ~DILocationVerifier() = default;
123 
124  const Instruction *getCurrentInst() const { return CurrInst; }
125  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126 
127  void erasingInstr(MachineInstr &MI) override {}
128  void changingInstr(MachineInstr &MI) override {}
129  void changedInstr(MachineInstr &MI) override {}
130 
131  void createdInstr(MachineInstr &MI) override {
132  assert(getCurrentInst() && "Inserted instruction without a current MI");
133 
134  // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136  LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137  << " was copied to " << MI);
138 #endif
139  assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140  "Line info was not transferred to all instructions");
141  }
142 };
143 } // namespace
144 #endif // ifndef NDEBUG
145 
146 
153 }
154 
155 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
156  SmallVectorImpl<LLT> &ValueTys,
158  uint64_t StartingOffset = 0) {
159  // Given a struct type, recursively traverse the elements.
160  if (StructType *STy = dyn_cast<StructType>(&Ty)) {
161  const StructLayout *SL = DL.getStructLayout(STy);
162  for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
163  computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
164  StartingOffset + SL->getElementOffset(I));
165  return;
166  }
167  // Given an array type, recursively traverse the elements.
168  if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
169  Type *EltTy = ATy->getElementType();
170  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
171  for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
172  computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
173  StartingOffset + i * EltSize);
174  return;
175  }
176  // Interpret void as zero return values.
177  if (Ty.isVoidTy())
178  return;
179  // Base case: we can get an LLT for this LLVM IR type.
180  ValueTys.push_back(getLLTForType(Ty, DL));
181  if (Offsets != nullptr)
182  Offsets->push_back(StartingOffset * 8);
183 }
184 
186 IRTranslator::allocateVRegs(const Value &Val) {
187  assert(!VMap.contains(Val) && "Value already allocated in VMap");
188  auto *Regs = VMap.getVRegs(Val);
189  auto *Offsets = VMap.getOffsets(Val);
190  SmallVector<LLT, 4> SplitTys;
191  computeValueLLTs(*DL, *Val.getType(), SplitTys,
192  Offsets->empty() ? Offsets : nullptr);
193  for (unsigned i = 0; i < SplitTys.size(); ++i)
194  Regs->push_back(0);
195  return *Regs;
196 }
197 
198 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
199  auto VRegsIt = VMap.findVRegs(Val);
200  if (VRegsIt != VMap.vregs_end())
201  return *VRegsIt->second;
202 
203  if (Val.getType()->isVoidTy())
204  return *VMap.getVRegs(Val);
205 
206  // Create entry for this type.
207  auto *VRegs = VMap.getVRegs(Val);
208  auto *Offsets = VMap.getOffsets(Val);
209 
210  assert(Val.getType()->isSized() &&
211  "Don't know how to create an empty vreg");
212 
213  SmallVector<LLT, 4> SplitTys;
214  computeValueLLTs(*DL, *Val.getType(), SplitTys,
215  Offsets->empty() ? Offsets : nullptr);
216 
217  if (!isa<Constant>(Val)) {
218  for (auto Ty : SplitTys)
219  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
220  return *VRegs;
221  }
222 
223  if (Val.getType()->isAggregateType()) {
224  // UndefValue, ConstantAggregateZero
225  auto &C = cast<Constant>(Val);
226  unsigned Idx = 0;
227  while (auto Elt = C.getAggregateElement(Idx++)) {
228  auto EltRegs = getOrCreateVRegs(*Elt);
229  llvm::copy(EltRegs, std::back_inserter(*VRegs));
230  }
231  } else {
232  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
233  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
234  bool Success = translate(cast<Constant>(Val), VRegs->front());
235  if (!Success) {
236  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
237  MF->getFunction().getSubprogram(),
238  &MF->getFunction().getEntryBlock());
239  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
240  reportTranslationError(*MF, *TPC, *ORE, R);
241  return *VRegs;
242  }
243  }
244 
245  return *VRegs;
246 }
247 
248 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
249  if (FrameIndices.find(&AI) != FrameIndices.end())
250  return FrameIndices[&AI];
251 
252  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
253  unsigned Size =
254  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
255 
256  // Always allocate at least one byte.
257  Size = std::max(Size, 1u);
258 
259  unsigned Alignment = AI.getAlignment();
260  if (!Alignment)
261  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
262 
263  int &FI = FrameIndices[&AI];
264  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
265  return FI;
266 }
267 
268 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
269  unsigned Alignment = 0;
270  Type *ValTy = nullptr;
271  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
272  Alignment = SI->getAlignment();
273  ValTy = SI->getValueOperand()->getType();
274  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
275  Alignment = LI->getAlignment();
276  ValTy = LI->getType();
277  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
278  // TODO(PR27168): This instruction has no alignment attribute, but unlike
279  // the default alignment for load/store, the default here is to assume
280  // it has NATURAL alignment, not DataLayout-specified alignment.
281  const DataLayout &DL = AI->getModule()->getDataLayout();
282  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
283  ValTy = AI->getCompareOperand()->getType();
284  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
285  // TODO(PR27168): This instruction has no alignment attribute, but unlike
286  // the default alignment for load/store, the default here is to assume
287  // it has NATURAL alignment, not DataLayout-specified alignment.
288  const DataLayout &DL = AI->getModule()->getDataLayout();
289  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
290  ValTy = AI->getType();
291  } else {
292  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
293  R << "unable to translate memop: " << ore::NV("Opcode", &I);
294  reportTranslationError(*MF, *TPC, *ORE, R);
295  return 1;
296  }
297 
298  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
299 }
300 
301 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
302  MachineBasicBlock *&MBB = BBToMBB[&BB];
303  assert(MBB && "BasicBlock was not encountered before");
304  return *MBB;
305 }
306 
307 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
308  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
309  MachinePreds[Edge].push_back(NewPred);
310 }
311 
312 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
313  MachineIRBuilder &MIRBuilder) {
314  // FIXME: handle signed/unsigned wrapping flags.
315 
316  // Get or create a virtual register for each value.
317  // Unless the value is a Constant => loadimm cst?
318  // or inline constant each time?
319  // Creation of a virtual register needs to have a size.
320  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
321  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
322  unsigned Res = getOrCreateVReg(U);
323  uint16_t Flags = 0;
324  if (isa<Instruction>(U)) {
325  const Instruction &I = cast<Instruction>(U);
327  }
328 
329  MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
330  return true;
331 }
332 
333 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
334  // -0.0 - X --> G_FNEG
335  if (isa<Constant>(U.getOperand(0)) &&
337  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
338  .addDef(getOrCreateVReg(U))
339  .addUse(getOrCreateVReg(*U.getOperand(1)));
340  return true;
341  }
342  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
343 }
344 
345 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
346  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
347  .addDef(getOrCreateVReg(U))
348  .addUse(getOrCreateVReg(*U.getOperand(0)));
349  return true;
350 }
351 
352 bool IRTranslator::translateCompare(const User &U,
353  MachineIRBuilder &MIRBuilder) {
354  const CmpInst *CI = dyn_cast<CmpInst>(&U);
355  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
356  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
357  unsigned Res = getOrCreateVReg(U);
358  CmpInst::Predicate Pred =
359  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
360  cast<ConstantExpr>(U).getPredicate());
361  if (CmpInst::isIntPredicate(Pred))
362  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
363  else if (Pred == CmpInst::FCMP_FALSE)
364  MIRBuilder.buildCopy(
365  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
366  else if (Pred == CmpInst::FCMP_TRUE)
367  MIRBuilder.buildCopy(
368  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
369  else {
370  MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
372  }
373 
374  return true;
375 }
376 
377 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
378  const ReturnInst &RI = cast<ReturnInst>(U);
379  const Value *Ret = RI.getReturnValue();
380  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
381  Ret = nullptr;
382 
383  ArrayRef<unsigned> VRegs;
384  if (Ret)
385  VRegs = getOrCreateVRegs(*Ret);
386 
387  // The target may mess up with the insertion point, but
388  // this is not important as a return is the last instruction
389  // of the block anyway.
390 
391  return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
392 }
393 
394 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
395  const BranchInst &BrInst = cast<BranchInst>(U);
396  unsigned Succ = 0;
397  if (!BrInst.isUnconditional()) {
398  // We want a G_BRCOND to the true BB followed by an unconditional branch.
399  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
400  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
401  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
402  MIRBuilder.buildBrCond(Tst, TrueBB);
403  }
404 
405  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
406  MachineBasicBlock &TgtBB = getMBB(BrTgt);
407  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
408 
409  // If the unconditional target is the layout successor, fallthrough.
410  if (!CurBB.isLayoutSuccessor(&TgtBB))
411  MIRBuilder.buildBr(TgtBB);
412 
413  // Link successors.
414  for (const BasicBlock *Succ : successors(&BrInst))
415  CurBB.addSuccessor(&getMBB(*Succ));
416  return true;
417 }
418 
419 bool IRTranslator::translateSwitch(const User &U,
420  MachineIRBuilder &MIRBuilder) {
421  // For now, just translate as a chain of conditional branches.
422  // FIXME: could we share most of the logic/code in
423  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
424  // At first sight, it seems most of the logic in there is independent of
425  // SelectionDAG-specifics and a lot of work went in to optimize switch
426  // lowering in there.
427 
428  const SwitchInst &SwInst = cast<SwitchInst>(U);
429  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
430  const BasicBlock *OrigBB = SwInst.getParent();
431 
432  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
433  for (auto &CaseIt : SwInst.cases()) {
434  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
435  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
436  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
437  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
438  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
439  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
440 
441  MIRBuilder.buildBrCond(Tst, TrueMBB);
442  CurMBB.addSuccessor(&TrueMBB);
443  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
444 
445  MachineBasicBlock *FalseMBB =
446  MF->CreateMachineBasicBlock(SwInst.getParent());
447  // Insert the comparison blocks one after the other.
448  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
449  MIRBuilder.buildBr(*FalseMBB);
450  CurMBB.addSuccessor(FalseMBB);
451 
452  MIRBuilder.setMBB(*FalseMBB);
453  }
454  // handle default case
455  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
456  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
457  MIRBuilder.buildBr(DefaultMBB);
458  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
459  CurMBB.addSuccessor(&DefaultMBB);
460  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
461 
462  return true;
463 }
464 
465 bool IRTranslator::translateIndirectBr(const User &U,
466  MachineIRBuilder &MIRBuilder) {
467  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
468 
469  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
470  MIRBuilder.buildBrIndirect(Tgt);
471 
472  // Link successors.
473  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
474  for (const BasicBlock *Succ : successors(&BrInst))
475  CurBB.addSuccessor(&getMBB(*Succ));
476 
477  return true;
478 }
479 
480 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
481  const LoadInst &LI = cast<LoadInst>(U);
482 
483  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
485  Flags |= MachineMemOperand::MOLoad;
486 
487  if (DL->getTypeStoreSize(LI.getType()) == 0)
488  return true;
489 
490  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
491  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
492  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
493 
494  for (unsigned i = 0; i < Regs.size(); ++i) {
495  unsigned Addr = 0;
496  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
497 
498  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
499  unsigned BaseAlign = getMemOpAlignment(LI);
500  auto MMO = MF->getMachineMemOperand(
501  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
502  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
503  LI.getSyncScopeID(), LI.getOrdering());
504  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
505  }
506 
507  return true;
508 }
509 
510 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
511  const StoreInst &SI = cast<StoreInst>(U);
512  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
515 
516  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
517  return true;
518 
519  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
520  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
521  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
522 
523  for (unsigned i = 0; i < Vals.size(); ++i) {
524  unsigned Addr = 0;
525  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
526 
527  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
528  unsigned BaseAlign = getMemOpAlignment(SI);
529  auto MMO = MF->getMachineMemOperand(
530  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
531  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
532  SI.getSyncScopeID(), SI.getOrdering());
533  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
534  }
535  return true;
536 }
537 
538 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
539  const Value *Src = U.getOperand(0);
541 
542  // getIndexedOffsetInType is designed for GEPs, so the first index is the
543  // usual array element rather than looking into the actual aggregate.
544  SmallVector<Value *, 1> Indices;
545  Indices.push_back(ConstantInt::get(Int32Ty, 0));
546 
547  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
548  for (auto Idx : EVI->indices())
549  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
550  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
551  for (auto Idx : IVI->indices())
552  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
553  } else {
554  for (unsigned i = 1; i < U.getNumOperands(); ++i)
555  Indices.push_back(U.getOperand(i));
556  }
557 
558  return 8 * static_cast<uint64_t>(
559  DL.getIndexedOffsetInType(Src->getType(), Indices));
560 }
561 
562 bool IRTranslator::translateExtractValue(const User &U,
563  MachineIRBuilder &MIRBuilder) {
564  const Value *Src = U.getOperand(0);
565  uint64_t Offset = getOffsetFromIndices(U, *DL);
566  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
567  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
568  unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
569  Offsets.begin();
570  auto &DstRegs = allocateVRegs(U);
571 
572  for (unsigned i = 0; i < DstRegs.size(); ++i)
573  DstRegs[i] = SrcRegs[Idx++];
574 
575  return true;
576 }
577 
578 bool IRTranslator::translateInsertValue(const User &U,
579  MachineIRBuilder &MIRBuilder) {
580  const Value *Src = U.getOperand(0);
581  uint64_t Offset = getOffsetFromIndices(U, *DL);
582  auto &DstRegs = allocateVRegs(U);
583  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
584  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
585  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
586  auto InsertedIt = InsertedRegs.begin();
587 
588  for (unsigned i = 0; i < DstRegs.size(); ++i) {
589  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
590  DstRegs[i] = *InsertedIt++;
591  else
592  DstRegs[i] = SrcRegs[i];
593  }
594 
595  return true;
596 }
597 
598 bool IRTranslator::translateSelect(const User &U,
599  MachineIRBuilder &MIRBuilder) {
600  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
601  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
602  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
603  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
604 
605  const SelectInst &SI = cast<SelectInst>(U);
606  uint16_t Flags = 0;
607  if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
609 
610  for (unsigned i = 0; i < ResRegs.size(); ++i) {
611  MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
612  {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
613  }
614 
615  return true;
616 }
617 
618 bool IRTranslator::translateBitCast(const User &U,
619  MachineIRBuilder &MIRBuilder) {
620  // If we're bitcasting to the source type, we can reuse the source vreg.
621  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
622  getLLTForType(*U.getType(), *DL)) {
623  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
624  auto &Regs = *VMap.getVRegs(U);
625  // If we already assigned a vreg for this bitcast, we can't change that.
626  // Emit a copy to satisfy the users we already emitted.
627  if (!Regs.empty())
628  MIRBuilder.buildCopy(Regs[0], SrcReg);
629  else {
630  Regs.push_back(SrcReg);
631  VMap.getOffsets(U)->push_back(0);
632  }
633  return true;
634  }
635  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
636 }
637 
638 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
639  MachineIRBuilder &MIRBuilder) {
640  unsigned Op = getOrCreateVReg(*U.getOperand(0));
641  unsigned Res = getOrCreateVReg(U);
642  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
643  return true;
644 }
645 
646 bool IRTranslator::translateGetElementPtr(const User &U,
647  MachineIRBuilder &MIRBuilder) {
648  // FIXME: support vector GEPs.
649  if (U.getType()->isVectorTy())
650  return false;
651 
652  Value &Op0 = *U.getOperand(0);
653  unsigned BaseReg = getOrCreateVReg(Op0);
654  Type *PtrIRTy = Op0.getType();
655  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
656  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
657  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
658 
659  int64_t Offset = 0;
660  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
661  GTI != E; ++GTI) {
662  const Value *Idx = GTI.getOperand();
663  if (StructType *StTy = GTI.getStructTypeOrNull()) {
664  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
665  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
666  continue;
667  } else {
668  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
669 
670  // If this is a scalar constant or a splat vector of constants,
671  // handle it quickly.
672  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
673  Offset += ElementSize * CI->getSExtValue();
674  continue;
675  }
676 
677  if (Offset != 0) {
678  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
679  unsigned OffsetReg =
680  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
681  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
682 
683  BaseReg = NewBaseReg;
684  Offset = 0;
685  }
686 
687  unsigned IdxReg = getOrCreateVReg(*Idx);
688  if (MRI->getType(IdxReg) != OffsetTy) {
689  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
690  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
691  IdxReg = NewIdxReg;
692  }
693 
694  // N = N + Idx * ElementSize;
695  // Avoid doing it for ElementSize of 1.
696  unsigned GepOffsetReg;
697  if (ElementSize != 1) {
698  unsigned ElementSizeReg =
699  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
700 
701  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
702  MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
703  } else
704  GepOffsetReg = IdxReg;
705 
706  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
707  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
708  BaseReg = NewBaseReg;
709  }
710  }
711 
712  if (Offset != 0) {
713  unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
714  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
715  return true;
716  }
717 
718  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
719  return true;
720 }
721 
722 bool IRTranslator::translateMemfunc(const CallInst &CI,
723  MachineIRBuilder &MIRBuilder,
724  unsigned ID) {
725  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
726  Type *DstTy = CI.getArgOperand(0)->getType();
727  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
728  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
729  return false;
730 
732  for (int i = 0; i < 3; ++i) {
733  const auto &Arg = CI.getArgOperand(i);
734  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
735  }
736 
737  const char *Callee;
738  switch (ID) {
739  case Intrinsic::memmove:
740  case Intrinsic::memcpy: {
741  Type *SrcTy = CI.getArgOperand(1)->getType();
742  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
743  return false;
744  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
745  break;
746  }
747  case Intrinsic::memset:
748  Callee = "memset";
749  break;
750  default:
751  return false;
752  }
753 
754  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
755  MachineOperand::CreateES(Callee),
756  CallLowering::ArgInfo(0, CI.getType()), Args);
757 }
758 
759 void IRTranslator::getStackGuard(unsigned DstReg,
760  MachineIRBuilder &MIRBuilder) {
761  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
762  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
763  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
764  MIB.addDef(DstReg);
765 
766  auto &TLI = *MF->getSubtarget().getTargetLowering();
767  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
768  if (!Global)
769  return;
770 
771  MachinePointerInfo MPInfo(Global);
775  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
776  DL->getPointerABIAlignment(0));
777  MIB.setMemRefs({MemRef});
778 }
779 
780 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
781  MachineIRBuilder &MIRBuilder) {
782  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
783  MIRBuilder.buildInstr(Op)
784  .addDef(ResRegs[0])
785  .addDef(ResRegs[1])
786  .addUse(getOrCreateVReg(*CI.getOperand(0)))
787  .addUse(getOrCreateVReg(*CI.getOperand(1)));
788 
789  return true;
790 }
791 
792 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
793  switch (ID) {
794  default:
795  break;
796  case Intrinsic::bswap:
797  return TargetOpcode::G_BSWAP;
798  case Intrinsic::ceil:
799  return TargetOpcode::G_FCEIL;
800  case Intrinsic::cos:
801  return TargetOpcode::G_FCOS;
802  case Intrinsic::ctpop:
803  return TargetOpcode::G_CTPOP;
804  case Intrinsic::exp:
805  return TargetOpcode::G_FEXP;
806  case Intrinsic::exp2:
807  return TargetOpcode::G_FEXP2;
808  case Intrinsic::fabs:
809  return TargetOpcode::G_FABS;
810  case Intrinsic::canonicalize:
811  return TargetOpcode::G_FCANONICALIZE;
812  case Intrinsic::floor:
813  return TargetOpcode::G_FFLOOR;
814  case Intrinsic::fma:
815  return TargetOpcode::G_FMA;
816  case Intrinsic::log:
817  return TargetOpcode::G_FLOG;
818  case Intrinsic::log2:
819  return TargetOpcode::G_FLOG2;
820  case Intrinsic::log10:
821  return TargetOpcode::G_FLOG10;
822  case Intrinsic::pow:
823  return TargetOpcode::G_FPOW;
824  case Intrinsic::round:
825  return TargetOpcode::G_INTRINSIC_ROUND;
826  case Intrinsic::sin:
827  return TargetOpcode::G_FSIN;
828  case Intrinsic::sqrt:
829  return TargetOpcode::G_FSQRT;
830  case Intrinsic::trunc:
831  return TargetOpcode::G_INTRINSIC_TRUNC;
832  }
834 }
835 
836 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
837  Intrinsic::ID ID,
838  MachineIRBuilder &MIRBuilder) {
839 
840  unsigned Op = getSimpleIntrinsicOpcode(ID);
841 
842  // Is this a simple intrinsic?
843  if (Op == Intrinsic::not_intrinsic)
844  return false;
845 
846  // Yes. Let's translate it.
848  for (auto &Arg : CI.arg_operands())
849  VRegs.push_back(getOrCreateVReg(*Arg));
850 
851  MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
853  return true;
854 }
855 
856 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
857  MachineIRBuilder &MIRBuilder) {
858 
859  // If this is a simple intrinsic (that is, we just need to add a def of
860  // a vreg, and uses for each arg operand, then translate it.
861  if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
862  return true;
863 
864  switch (ID) {
865  default:
866  break;
867  case Intrinsic::lifetime_start:
868  case Intrinsic::lifetime_end: {
869  // No stack colouring in O0, discard region information.
870  if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
871  return true;
872 
873  unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
875 
876  // Get the underlying objects for the location passed on the lifetime
877  // marker.
878  SmallVector<Value *, 4> Allocas;
879  GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
880 
881  // Iterate over each underlying object, creating lifetime markers for each
882  // static alloca. Quit if we find a non-static alloca.
883  for (Value *V : Allocas) {
884  AllocaInst *AI = dyn_cast<AllocaInst>(V);
885  if (!AI)
886  continue;
887 
888  if (!AI->isStaticAlloca())
889  return true;
890 
891  MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
892  }
893  return true;
894  }
895  case Intrinsic::dbg_declare: {
896  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
897  assert(DI.getVariable() && "Missing variable");
898 
899  const Value *Address = DI.getAddress();
900  if (!Address || isa<UndefValue>(Address)) {
901  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
902  return true;
903  }
904 
906  MIRBuilder.getDebugLoc()) &&
907  "Expected inlined-at fields to agree");
908  auto AI = dyn_cast<AllocaInst>(Address);
909  if (AI && AI->isStaticAlloca()) {
910  // Static allocas are tracked at the MF level, no need for DBG_VALUE
911  // instructions (in fact, they get ignored if they *do* exist).
912  MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
913  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
914  } else {
915  // A dbg.declare describes the address of a source variable, so lower it
916  // into an indirect DBG_VALUE.
917  MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
918  DI.getVariable(), DI.getExpression());
919  }
920  return true;
921  }
922  case Intrinsic::dbg_label: {
923  const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
924  assert(DI.getLabel() && "Missing label");
925 
927  MIRBuilder.getDebugLoc()) &&
928  "Expected inlined-at fields to agree");
929 
930  MIRBuilder.buildDbgLabel(DI.getLabel());
931  return true;
932  }
933  case Intrinsic::vaend:
934  // No target I know of cares about va_end. Certainly no in-tree target
935  // does. Simplest intrinsic ever!
936  return true;
937  case Intrinsic::vastart: {
938  auto &TLI = *MF->getSubtarget().getTargetLowering();
939  Value *Ptr = CI.getArgOperand(0);
940  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
941 
942  // FIXME: Get alignment
943  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
944  .addUse(getOrCreateVReg(*Ptr))
945  .addMemOperand(MF->getMachineMemOperand(
946  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
947  return true;
948  }
949  case Intrinsic::dbg_value: {
950  // This form of DBG_VALUE is target-independent.
951  const DbgValueInst &DI = cast<DbgValueInst>(CI);
952  const Value *V = DI.getValue();
954  MIRBuilder.getDebugLoc()) &&
955  "Expected inlined-at fields to agree");
956  if (!V) {
957  // Currently the optimizer can produce this; insert an undef to
958  // help debugging. Probably the optimizer should not do this.
959  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
960  } else if (const auto *CI = dyn_cast<Constant>(V)) {
961  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
962  } else {
963  unsigned Reg = getOrCreateVReg(*V);
964  // FIXME: This does not handle register-indirect values at offset 0. The
965  // direct/indirect thing shouldn't really be handled by something as
966  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
967  // pretty baked in right now.
968  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
969  }
970  return true;
971  }
972  case Intrinsic::uadd_with_overflow:
973  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
974  case Intrinsic::sadd_with_overflow:
975  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
976  case Intrinsic::usub_with_overflow:
977  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
978  case Intrinsic::ssub_with_overflow:
979  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
980  case Intrinsic::umul_with_overflow:
981  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
982  case Intrinsic::smul_with_overflow:
983  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
984  case Intrinsic::fmuladd: {
985  const TargetMachine &TM = MF->getTarget();
986  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
987  unsigned Dst = getOrCreateVReg(CI);
988  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
989  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
990  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
992  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
993  // TODO: Revisit this to see if we should move this part of the
994  // lowering to the combiner.
995  MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
997  } else {
998  LLT Ty = getLLTForType(*CI.getType(), *DL);
999  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1001  MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1003  }
1004  return true;
1005  }
1006  case Intrinsic::memcpy:
1007  case Intrinsic::memmove:
1008  case Intrinsic::memset:
1009  return translateMemfunc(CI, MIRBuilder, ID);
1010  case Intrinsic::eh_typeid_for: {
1012  unsigned Reg = getOrCreateVReg(CI);
1013  unsigned TypeID = MF->getTypeIDFor(GV);
1014  MIRBuilder.buildConstant(Reg, TypeID);
1015  return true;
1016  }
1017  case Intrinsic::objectsize: {
1018  // If we don't know by now, we're never going to know.
1019  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
1020 
1021  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1022  return true;
1023  }
1024  case Intrinsic::is_constant:
1025  // If this wasn't constant-folded away by now, then it's not a
1026  // constant.
1027  MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1028  return true;
1029  case Intrinsic::stackguard:
1030  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1031  return true;
1032  case Intrinsic::stackprotector: {
1033  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1034  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1035  getStackGuard(GuardVal, MIRBuilder);
1036 
1037  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1038  int FI = getOrCreateFrameIndex(*Slot);
1039  MF->getFrameInfo().setStackProtectorIndex(FI);
1040 
1041  MIRBuilder.buildStore(
1042  GuardVal, getOrCreateVReg(*Slot),
1043  *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1046  PtrTy.getSizeInBits() / 8, 8));
1047  return true;
1048  }
1049  case Intrinsic::cttz:
1050  case Intrinsic::ctlz: {
1051  ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1052  bool isTrailing = ID == Intrinsic::cttz;
1053  unsigned Opcode = isTrailing
1054  ? Cst->isZero() ? TargetOpcode::G_CTTZ
1055  : TargetOpcode::G_CTTZ_ZERO_UNDEF
1056  : Cst->isZero() ? TargetOpcode::G_CTLZ
1057  : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1058  MIRBuilder.buildInstr(Opcode)
1059  .addDef(getOrCreateVReg(CI))
1060  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1061  return true;
1062  }
1063  case Intrinsic::invariant_start: {
1064  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1065  unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1066  MIRBuilder.buildUndef(Undef);
1067  return true;
1068  }
1069  case Intrinsic::invariant_end:
1070  return true;
1071  }
1072  return false;
1073 }
1074 
1075 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1076  MachineIRBuilder &MIRBuilder) {
1077  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1078  if (!IA.getConstraintString().empty())
1079  return false;
1080 
1081  unsigned ExtraInfo = 0;
1082  if (IA.hasSideEffects())
1083  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1084  if (IA.getDialect() == InlineAsm::AD_Intel)
1085  ExtraInfo |= InlineAsm::Extra_AsmDialect;
1086 
1088  .addExternalSymbol(IA.getAsmString().c_str())
1089  .addImm(ExtraInfo);
1090 
1091  return true;
1092 }
1093 
1094 unsigned IRTranslator::packRegs(const Value &V,
1095  MachineIRBuilder &MIRBuilder) {
1096  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1097  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1098  LLT BigTy = getLLTForType(*V.getType(), *DL);
1099 
1100  if (Regs.size() == 1)
1101  return Regs[0];
1102 
1103  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1104  MIRBuilder.buildUndef(Dst);
1105  for (unsigned i = 0; i < Regs.size(); ++i) {
1106  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1107  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1108  Dst = NewDst;
1109  }
1110  return Dst;
1111 }
1112 
1113 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1114  MachineIRBuilder &MIRBuilder) {
1115  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1116  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1117 
1118  for (unsigned i = 0; i < Regs.size(); ++i)
1119  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1120 }
1121 
1122 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1123  const CallInst &CI = cast<CallInst>(U);
1124  auto TII = MF->getTarget().getIntrinsicInfo();
1125  const Function *F = CI.getCalledFunction();
1126 
1127  // FIXME: support Windows dllimport function calls.
1128  if (F && F->hasDLLImportStorageClass())
1129  return false;
1130 
1131  if (CI.isInlineAsm())
1132  return translateInlineAsm(CI, MIRBuilder);
1133 
1135  if (F && F->isIntrinsic()) {
1136  ID = F->getIntrinsicID();
1137  if (TII && ID == Intrinsic::not_intrinsic)
1138  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1139  }
1140 
1141  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1142  bool IsSplitType = valueIsSplit(CI);
1143  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1144  getLLTForType(*CI.getType(), *DL))
1145  : getOrCreateVReg(CI);
1146 
1148  for (auto &Arg: CI.arg_operands())
1149  Args.push_back(packRegs(*Arg, MIRBuilder));
1150 
1151  MF->getFrameInfo().setHasCalls(true);
1152  bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1153  return getOrCreateVReg(*CI.getCalledValue());
1154  });
1155 
1156  if (IsSplitType)
1157  unpackRegs(CI, Res, MIRBuilder);
1158  return Success;
1159  }
1160 
1161  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1162 
1163  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1164  return true;
1165 
1166  ArrayRef<unsigned> ResultRegs;
1167  if (!CI.getType()->isVoidTy())
1168  ResultRegs = getOrCreateVRegs(CI);
1169 
1170  MachineInstrBuilder MIB =
1171  MIRBuilder.buildIntrinsic(ID, ResultRegs, !CI.doesNotAccessMemory());
1172 
1173  for (auto &Arg : CI.arg_operands()) {
1174  // Some intrinsics take metadata parameters. Reject them.
1175  if (isa<MetadataAsValue>(Arg))
1176  return false;
1177  MIB.addUse(packRegs(*Arg, MIRBuilder));
1178  }
1179 
1180  // Add a MachineMemOperand if it is a target mem intrinsic.
1181  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1182  TargetLowering::IntrinsicInfo Info;
1183  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1184  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1185  unsigned Align = Info.align;
1186  if (Align == 0)
1187  Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
1188 
1189  uint64_t Size = Info.memVT.getStoreSize();
1190  MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1191  Info.flags, Size, Align));
1192  }
1193 
1194  return true;
1195 }
1196 
1197 bool IRTranslator::translateInvoke(const User &U,
1198  MachineIRBuilder &MIRBuilder) {
1199  const InvokeInst &I = cast<InvokeInst>(U);
1200  MCContext &Context = MF->getContext();
1201 
1202  const BasicBlock *ReturnBB = I.getSuccessor(0);
1203  const BasicBlock *EHPadBB = I.getSuccessor(1);
1204 
1205  const Value *Callee = I.getCalledValue();
1206  const Function *Fn = dyn_cast<Function>(Callee);
1207  if (isa<InlineAsm>(Callee))
1208  return false;
1209 
1210  // FIXME: support invoking patchpoint and statepoint intrinsics.
1211  if (Fn && Fn->isIntrinsic())
1212  return false;
1213 
1214  // FIXME: support whatever these are.
1216  return false;
1217 
1218  // FIXME: support Windows exception handling.
1219  if (!isa<LandingPadInst>(EHPadBB->front()))
1220  return false;
1221 
1222  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1223  // the region covered by the try.
1224  MCSymbol *BeginSymbol = Context.createTempSymbol();
1225  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1226 
1227  unsigned Res =
1228  MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1230  for (auto &Arg: I.arg_operands())
1231  Args.push_back(packRegs(*Arg, MIRBuilder));
1232 
1233  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1234  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1235  return false;
1236 
1237  unpackRegs(I, Res, MIRBuilder);
1238 
1239  MCSymbol *EndSymbol = Context.createTempSymbol();
1240  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1241 
1242  // FIXME: track probabilities.
1243  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1244  &ReturnMBB = getMBB(*ReturnBB);
1245  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1246  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1247  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1248  MIRBuilder.buildBr(ReturnMBB);
1249 
1250  return true;
1251 }
1252 
1253 bool IRTranslator::translateCallBr(const User &U,
1254  MachineIRBuilder &MIRBuilder) {
1255  // FIXME: Implement this.
1256  return false;
1257 }
1258 
1259 bool IRTranslator::translateLandingPad(const User &U,
1260  MachineIRBuilder &MIRBuilder) {
1261  const LandingPadInst &LP = cast<LandingPadInst>(U);
1262 
1263  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1264 
1265  MBB.setIsEHPad();
1266 
1267  // If there aren't registers to copy the values into (e.g., during SjLj
1268  // exceptions), then don't bother.
1269  auto &TLI = *MF->getSubtarget().getTargetLowering();
1270  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1271  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1272  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1273  return true;
1274 
1275  // If landingpad's return type is token type, we don't create DAG nodes
1276  // for its exception pointer and selector value. The extraction of exception
1277  // pointer or selector value from token type landingpads is not currently
1278  // supported.
1279  if (LP.getType()->isTokenTy())
1280  return true;
1281 
1282  // Add a label to mark the beginning of the landing pad. Deletion of the
1283  // landing pad can thus be detected via the MachineModuleInfo.
1285  .addSym(MF->addLandingPad(&MBB));
1286 
1287  LLT Ty = getLLTForType(*LP.getType(), *DL);
1288  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1289  MIRBuilder.buildUndef(Undef);
1290 
1291  SmallVector<LLT, 2> Tys;
1292  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1293  Tys.push_back(getLLTForType(*Ty, *DL));
1294  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1295 
1296  // Mark exception register as live in.
1297  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1298  if (!ExceptionReg)
1299  return false;
1300 
1301  MBB.addLiveIn(ExceptionReg);
1302  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1303  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1304 
1305  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1306  if (!SelectorReg)
1307  return false;
1308 
1309  MBB.addLiveIn(SelectorReg);
1310  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1311  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1312  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1313 
1314  return true;
1315 }
1316 
1317 bool IRTranslator::translateAlloca(const User &U,
1318  MachineIRBuilder &MIRBuilder) {
1319  auto &AI = cast<AllocaInst>(U);
1320 
1321  if (AI.isSwiftError())
1322  return false;
1323 
1324  if (AI.isStaticAlloca()) {
1325  unsigned Res = getOrCreateVReg(AI);
1326  int FI = getOrCreateFrameIndex(AI);
1327  MIRBuilder.buildFrameIndex(Res, FI);
1328  return true;
1329  }
1330 
1331  // FIXME: support stack probing for Windows.
1332  if (MF->getTarget().getTargetTriple().isOSWindows())
1333  return false;
1334 
1335  // Now we're in the harder dynamic case.
1336  Type *Ty = AI.getAllocatedType();
1337  unsigned Align =
1338  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1339 
1340  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1341 
1342  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1343  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1344  if (MRI->getType(NumElts) != IntPtrTy) {
1345  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1346  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1347  NumElts = ExtElts;
1348  }
1349 
1350  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1351  unsigned TySize =
1352  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1353  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1354 
1355  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1356  auto &TLI = *MF->getSubtarget().getTargetLowering();
1357  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1358 
1359  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1360  MIRBuilder.buildCopy(SPTmp, SPReg);
1361 
1362  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1363  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1364 
1365  // Handle alignment. We have to realign if the allocation granule was smaller
1366  // than stack alignment, or the specific alloca requires more than stack
1367  // alignment.
1368  unsigned StackAlign =
1369  MF->getSubtarget().getFrameLowering()->getStackAlignment();
1370  Align = std::max(Align, StackAlign);
1371  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1372  // Round the size of the allocation up to the stack alignment size
1373  // by add SA-1 to the size. This doesn't overflow because we're computing
1374  // an address inside an alloca.
1375  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1376  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1377  AllocTmp = AlignedAlloc;
1378  }
1379 
1380  MIRBuilder.buildCopy(SPReg, AllocTmp);
1381  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1382 
1383  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1384  assert(MF->getFrameInfo().hasVarSizedObjects());
1385  return true;
1386 }
1387 
1388 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1389  // FIXME: We may need more info about the type. Because of how LLT works,
1390  // we're completely discarding the i64/double distinction here (amongst
1391  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1392  // anyway but that's not guaranteed.
1393  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1394  .addDef(getOrCreateVReg(U))
1395  .addUse(getOrCreateVReg(*U.getOperand(0)))
1396  .addImm(DL->getABITypeAlignment(U.getType()));
1397  return true;
1398 }
1399 
1400 bool IRTranslator::translateInsertElement(const User &U,
1401  MachineIRBuilder &MIRBuilder) {
1402  // If it is a <1 x Ty> vector, use the scalar as it is
1403  // not a legal vector type in LLT.
1404  if (U.getType()->getVectorNumElements() == 1) {
1405  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1406  auto &Regs = *VMap.getVRegs(U);
1407  if (Regs.empty()) {
1408  Regs.push_back(Elt);
1409  VMap.getOffsets(U)->push_back(0);
1410  } else {
1411  MIRBuilder.buildCopy(Regs[0], Elt);
1412  }
1413  return true;
1414  }
1415 
1416  unsigned Res = getOrCreateVReg(U);
1417  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1418  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1419  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1420  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1421  return true;
1422 }
1423 
1424 bool IRTranslator::translateExtractElement(const User &U,
1425  MachineIRBuilder &MIRBuilder) {
1426  // If it is a <1 x Ty> vector, use the scalar as it is
1427  // not a legal vector type in LLT.
1428  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1429  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1430  auto &Regs = *VMap.getVRegs(U);
1431  if (Regs.empty()) {
1432  Regs.push_back(Elt);
1433  VMap.getOffsets(U)->push_back(0);
1434  } else {
1435  MIRBuilder.buildCopy(Regs[0], Elt);
1436  }
1437  return true;
1438  }
1439  unsigned Res = getOrCreateVReg(U);
1440  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1441  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1442  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1443  unsigned Idx = 0;
1444  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1445  if (CI->getBitWidth() != PreferredVecIdxWidth) {
1446  APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1447  auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1448  Idx = getOrCreateVReg(*NewIdxCI);
1449  }
1450  }
1451  if (!Idx)
1452  Idx = getOrCreateVReg(*U.getOperand(1));
1453  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1454  const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1455  Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1456  }
1457  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1458  return true;
1459 }
1460 
1461 bool IRTranslator::translateShuffleVector(const User &U,
1462  MachineIRBuilder &MIRBuilder) {
1463  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1464  .addDef(getOrCreateVReg(U))
1465  .addUse(getOrCreateVReg(*U.getOperand(0)))
1466  .addUse(getOrCreateVReg(*U.getOperand(1)))
1467  .addUse(getOrCreateVReg(*U.getOperand(2)));
1468  return true;
1469 }
1470 
1471 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1472  const PHINode &PI = cast<PHINode>(U);
1473 
1475  for (auto Reg : getOrCreateVRegs(PI)) {
1476  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1477  Insts.push_back(MIB.getInstr());
1478  }
1479 
1480  PendingPHIs.emplace_back(&PI, std::move(Insts));
1481  return true;
1482 }
1483 
1484 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1485  MachineIRBuilder &MIRBuilder) {
1486  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1487 
1488  if (I.isWeak())
1489  return false;
1490 
1491  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1494 
1495  Type *ResType = I.getType();
1496  Type *ValType = ResType->Type::getStructElementType(0);
1497 
1498  auto Res = getOrCreateVRegs(I);
1499  unsigned OldValRes = Res[0];
1500  unsigned SuccessRes = Res[1];
1501  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1502  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1503  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1504 
1505  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1506  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1507  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1508  Flags, DL->getTypeStoreSize(ValType),
1509  getMemOpAlignment(I), AAMDNodes(), nullptr,
1511  I.getFailureOrdering()));
1512  return true;
1513 }
1514 
1515 bool IRTranslator::translateAtomicRMW(const User &U,
1516  MachineIRBuilder &MIRBuilder) {
1517  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1518 
1519  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1522 
1523  Type *ResType = I.getType();
1524 
1525  unsigned Res = getOrCreateVReg(I);
1526  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1527  unsigned Val = getOrCreateVReg(*I.getValOperand());
1528 
1529  unsigned Opcode = 0;
1530  switch (I.getOperation()) {
1531  default:
1532  llvm_unreachable("Unknown atomicrmw op");
1533  return false;
1534  case AtomicRMWInst::Xchg:
1535  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1536  break;
1537  case AtomicRMWInst::Add:
1538  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1539  break;
1540  case AtomicRMWInst::Sub:
1541  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1542  break;
1543  case AtomicRMWInst::And:
1544  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1545  break;
1546  case AtomicRMWInst::Nand:
1547  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1548  break;
1549  case AtomicRMWInst::Or:
1550  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1551  break;
1552  case AtomicRMWInst::Xor:
1553  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1554  break;
1555  case AtomicRMWInst::Max:
1556  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1557  break;
1558  case AtomicRMWInst::Min:
1559  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1560  break;
1561  case AtomicRMWInst::UMax:
1562  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1563  break;
1564  case AtomicRMWInst::UMin:
1565  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1566  break;
1567  }
1568 
1569  MIRBuilder.buildAtomicRMW(
1570  Opcode, Res, Addr, Val,
1571  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1572  Flags, DL->getTypeStoreSize(ResType),
1573  getMemOpAlignment(I), AAMDNodes(), nullptr,
1574  I.getSyncScopeID(), I.getOrdering()));
1575  return true;
1576 }
1577 
1578 void IRTranslator::finishPendingPhis() {
1579 #ifndef NDEBUG
1580  DILocationVerifier Verifier;
1581  GISelObserverWrapper WrapperObserver(&Verifier);
1582  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1583 #endif // ifndef NDEBUG
1584  for (auto &Phi : PendingPHIs) {
1585  const PHINode *PI = Phi.first;
1586  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1587  EntryBuilder->setDebugLoc(PI->getDebugLoc());
1588 #ifndef NDEBUG
1589  Verifier.setCurrentInst(PI);
1590 #endif // ifndef NDEBUG
1591 
1592  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1593  // won't create extra control flow here, otherwise we need to find the
1594  // dominating predecessor here (or perhaps force the weirder IRTranslators
1595  // to provide a simple boundary).
1596  SmallSet<const BasicBlock *, 4> HandledPreds;
1597 
1598  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1599  auto IRPred = PI->getIncomingBlock(i);
1600  if (HandledPreds.count(IRPred))
1601  continue;
1602 
1603  HandledPreds.insert(IRPred);
1604  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1605  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1606  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1607  "incorrect CFG at MachineBasicBlock level");
1608  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1609  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1610  MIB.addUse(ValRegs[j]);
1611  MIB.addMBB(Pred);
1612  }
1613  }
1614  }
1615  }
1616 }
1617 
1618 bool IRTranslator::valueIsSplit(const Value &V,
1620  SmallVector<LLT, 4> SplitTys;
1621  if (Offsets && !Offsets->empty())
1622  Offsets->clear();
1623  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1624  return SplitTys.size() > 1;
1625 }
1626 
1627 bool IRTranslator::translate(const Instruction &Inst) {
1628  CurBuilder->setDebugLoc(Inst.getDebugLoc());
1629  EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1630  switch(Inst.getOpcode()) {
1631 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1632  case Instruction::OPCODE: \
1633  return translate##OPCODE(Inst, *CurBuilder.get());
1634 #include "llvm/IR/Instruction.def"
1635  default:
1636  return false;
1637  }
1638 }
1639 
1640 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1641  if (auto CI = dyn_cast<ConstantInt>(&C))
1642  EntryBuilder->buildConstant(Reg, *CI);
1643  else if (auto CF = dyn_cast<ConstantFP>(&C))
1644  EntryBuilder->buildFConstant(Reg, *CF);
1645  else if (isa<UndefValue>(C))
1646  EntryBuilder->buildUndef(Reg);
1647  else if (isa<ConstantPointerNull>(C)) {
1648  // As we are trying to build a constant val of 0 into a pointer,
1649  // insert a cast to make them correct with respect to types.
1650  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1651  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1652  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1653  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1654  EntryBuilder->buildCast(Reg, ZeroReg);
1655  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1656  EntryBuilder->buildGlobalValue(Reg, GV);
1657  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1658  if (!CAZ->getType()->isVectorTy())
1659  return false;
1660  // Return the scalar if it is a <1 x Ty> vector.
1661  if (CAZ->getNumElements() == 1)
1662  return translate(*CAZ->getElementValue(0u), Reg);
1664  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1665  Constant &Elt = *CAZ->getElementValue(i);
1666  Ops.push_back(getOrCreateVReg(Elt));
1667  }
1668  EntryBuilder->buildBuildVector(Reg, Ops);
1669  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1670  // Return the scalar if it is a <1 x Ty> vector.
1671  if (CV->getNumElements() == 1)
1672  return translate(*CV->getElementAsConstant(0), Reg);
1674  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1675  Constant &Elt = *CV->getElementAsConstant(i);
1676  Ops.push_back(getOrCreateVReg(Elt));
1677  }
1678  EntryBuilder->buildBuildVector(Reg, Ops);
1679  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1680  switch(CE->getOpcode()) {
1681 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1682  case Instruction::OPCODE: \
1683  return translate##OPCODE(*CE, *EntryBuilder.get());
1684 #include "llvm/IR/Instruction.def"
1685  default:
1686  return false;
1687  }
1688  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1689  if (CV->getNumOperands() == 1)
1690  return translate(*CV->getOperand(0), Reg);
1692  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1693  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1694  }
1695  EntryBuilder->buildBuildVector(Reg, Ops);
1696  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1697  EntryBuilder->buildBlockAddress(Reg, BA);
1698  } else
1699  return false;
1700 
1701  return true;
1702 }
1703 
1704 void IRTranslator::finalizeFunction() {
1705  // Release the memory used by the different maps we
1706  // needed during the translation.
1707  PendingPHIs.clear();
1708  VMap.reset();
1709  FrameIndices.clear();
1710  MachinePreds.clear();
1711  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1712  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1713  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1714  EntryBuilder.reset();
1715  CurBuilder.reset();
1716 }
1717 
1719  MF = &CurMF;
1720  const Function &F = MF->getFunction();
1721  if (F.empty())
1722  return false;
1724  getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1725  // Set the CSEConfig and run the analysis.
1726  GISelCSEInfo *CSEInfo = nullptr;
1727  TPC = &getAnalysis<TargetPassConfig>();
1728  bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
1730  : TPC->isGISelCSEEnabled();
1731 
1732  if (EnableCSE) {
1733  EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1734  std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>();
1735  CSEInfo = &Wrapper.get(std::move(Config));
1736  EntryBuilder->setCSEInfo(CSEInfo);
1737  CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1738  CurBuilder->setCSEInfo(CSEInfo);
1739  } else {
1740  EntryBuilder = make_unique<MachineIRBuilder>();
1741  CurBuilder = make_unique<MachineIRBuilder>();
1742  }
1743  CLI = MF->getSubtarget().getCallLowering();
1744  CurBuilder->setMF(*MF);
1745  EntryBuilder->setMF(*MF);
1746  MRI = &MF->getRegInfo();
1747  DL = &F.getParent()->getDataLayout();
1748  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1749 
1750  assert(PendingPHIs.empty() && "stale PHIs");
1751 
1752  if (!DL->isLittleEndian()) {
1753  // Currently we don't properly handle big endian code.
1754  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1755  F.getSubprogram(), &F.getEntryBlock());
1756  R << "unable to translate in big endian mode";
1757  reportTranslationError(*MF, *TPC, *ORE, R);
1758  }
1759 
1760  // Release the per-function state when we return, whether we succeeded or not.
1761  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1762 
1763  // Setup a separate basic-block for the arguments and constants
1764  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1765  MF->push_back(EntryBB);
1766  EntryBuilder->setMBB(*EntryBB);
1767 
1768  // Create all blocks, in IR order, to preserve the layout.
1769  for (const BasicBlock &BB: F) {
1770  auto *&MBB = BBToMBB[&BB];
1771 
1772  MBB = MF->CreateMachineBasicBlock(&BB);
1773  MF->push_back(MBB);
1774 
1775  if (BB.hasAddressTaken())
1776  MBB->setHasAddressTaken();
1777  }
1778 
1779  // Make our arguments/constants entry block fallthrough to the IR entry block.
1780  EntryBB->addSuccessor(&getMBB(F.front()));
1781 
1782  // Lower the actual args into this basic block.
1783  SmallVector<unsigned, 8> VRegArgs;
1784  for (const Argument &Arg: F.args()) {
1785  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1786  continue; // Don't handle zero sized types.
1787  VRegArgs.push_back(
1788  MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1789  }
1790 
1791  // We don't currently support translating swifterror or swiftself functions.
1792  for (auto &Arg : F.args()) {
1793  if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1794  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1795  F.getSubprogram(), &F.getEntryBlock());
1796  R << "unable to lower arguments due to swifterror/swiftself: "
1797  << ore::NV("Prototype", F.getType());
1798  reportTranslationError(*MF, *TPC, *ORE, R);
1799  return false;
1800  }
1801  }
1802 
1803  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1804  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1805  F.getSubprogram(), &F.getEntryBlock());
1806  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1807  reportTranslationError(*MF, *TPC, *ORE, R);
1808  return false;
1809  }
1810 
1811  auto ArgIt = F.arg_begin();
1812  for (auto &VArg : VRegArgs) {
1813  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1814  // creating redundant copies.
1815  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1816  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1817  assert(VRegs.empty() && "VRegs already populated?");
1818  VRegs.push_back(VArg);
1819  } else {
1820  unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1821  }
1822  ArgIt++;
1823  }
1824 
1825  // Need to visit defs before uses when translating instructions.
1826  GISelObserverWrapper WrapperObserver;
1827  if (EnableCSE && CSEInfo)
1828  WrapperObserver.addObserver(CSEInfo);
1829  {
1831 #ifndef NDEBUG
1832  DILocationVerifier Verifier;
1833  WrapperObserver.addObserver(&Verifier);
1834 #endif // ifndef NDEBUG
1835  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1836  for (const BasicBlock *BB : RPOT) {
1837  MachineBasicBlock &MBB = getMBB(*BB);
1838  // Set the insertion point of all the following translations to
1839  // the end of this basic block.
1840  CurBuilder->setMBB(MBB);
1841 
1842  for (const Instruction &Inst : *BB) {
1843 #ifndef NDEBUG
1844  Verifier.setCurrentInst(&Inst);
1845 #endif // ifndef NDEBUG
1846  if (translate(Inst))
1847  continue;
1848 
1849  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1850  Inst.getDebugLoc(), BB);
1851  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1852 
1853  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1854  std::string InstStrStorage;
1855  raw_string_ostream InstStr(InstStrStorage);
1856  InstStr << Inst;
1857 
1858  R << ": '" << InstStr.str() << "'";
1859  }
1860 
1861  reportTranslationError(*MF, *TPC, *ORE, R);
1862  return false;
1863  }
1864  }
1865 #ifndef NDEBUG
1866  WrapperObserver.removeObserver(&Verifier);
1867 #endif
1868  }
1869 
1870  finishPendingPhis();
1871 
1872  // Merge the argument lowering and constants block with its single
1873  // successor, the LLVM-IR entry block. We want the basic block to
1874  // be maximal.
1875  assert(EntryBB->succ_size() == 1 &&
1876  "Custom BB used for lowering should have only one successor");
1877  // Get the successor of the current entry block.
1878  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1879  assert(NewEntryBB.pred_size() == 1 &&
1880  "LLVM-IR entry block has a predecessor!?");
1881  // Move all the instruction from the current entry block to the
1882  // new entry block.
1883  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1884  EntryBB->end());
1885 
1886  // Update the live-in information for the new entry block.
1887  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1888  NewEntryBB.addLiveIn(LiveIn);
1889  NewEntryBB.sortUniqueLiveIns();
1890 
1891  // Get rid of the now empty basic block.
1892  EntryBB->removeSuccessor(&NewEntryBB);
1893  MF->remove(EntryBB);
1894  MF->DeleteMachineBasicBlock(EntryBB);
1895 
1896  assert(&MF->front() == &NewEntryBB &&
1897  "New entry wasn't next in the list of basic block!");
1898 
1899  // Initialize stack protector information.
1900  StackProtector &SP = getAnalysis<StackProtector>();
1901  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1902 
1903  return false;
1904 }
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:409
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:198
A simple RAII based CSEInfo installer.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:636
bool empty() const
Definition: Function.h:661
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:172
The CSE Analysis object.
Definition: CSEInfo.h:68
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
*p = old <signed v ? old : v
Definition: Instructions.h:721
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
LLVMContext & Context
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1465
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
DiagnosticInfoOptimizationBase::Argument NV
This represents the llvm.dbg.label instruction.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:338
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
iterator begin() const
Definition: ArrayRef.h:136
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:528
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:607
void push_back(const T &Elt)
Definition: SmallVector.h:211
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< unsigned > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
unsigned getReg() const
getReg - Returns the register number.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:384
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:80
*p = old <unsigned v ? old : v
Definition: Instructions.h:725
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:135
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:247
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1026
*p = old >unsigned v ? old : v
Definition: Instructions.h:723
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:709
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:261
BasicBlock * getSuccessor(unsigned i) const
unsigned const TargetRegisterInfo * TRI
F(f)
The actual analysis pass wrapper.
Definition: CSEInfo.h:212
An instruction for reading from memory.
Definition: Instructions.h:167
void setMF(MachineFunction &MF)
Definition: CSEInfo.cpp:66
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:117
*p = old >signed v ? old : v
Definition: Instructions.h:719
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:595
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool hasSideEffects() const
Definition: InlineAsm.h:66
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1155
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:554
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
A description of a memory reference used in the backend.
GISelCSEInfo & get(std::unique_ptr< CSEConfig > CSEOpt, bool ReCompute=false)
Takes a CSEConfig object that defines what opcodes get CSEd.
Definition: CSEInfo.cpp:351
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1692
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
#define DEBUG_TYPE
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
Class to represent struct types.
Definition: DerivedTypes.h:232
DILabel * getLabel() const
BinOp getOperation() const
Definition: Instructions.h:750
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:569
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:557
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:668
Context object for machine code objects.
Definition: MCContext.h:62
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
Definition: Lint.cpp:83
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
Class to represent array types.
Definition: DerivedTypes.h:400
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1147
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1281
An instruction for storing to memory.
Definition: Instructions.h:320
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:850
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
The memory access is volatile.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
const BasicBlock & getEntryBlock() const
Definition: Function.h:639
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
Abstract class that contains various methods for clients to notify about changes. ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:802
unsigned const MachineRegisterInfo * MRI
Value * getCalledValue() const
Definition: InstrTypes.h:1194
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1507
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Value * getAddress() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1270
Value * getValue() const
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:280
Indirect Branch Instruction.
Helper class to build MachineInstr.
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:646
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Value * getPointerOperand()
Definition: Instructions.h:284
self_iterator getIterator()
Definition: ilist_node.h:81
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:328
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:663
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:608
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static uint16_t copyFlagsFromInstruction(const Instruction &I)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:789
Simple wrapper that does the following.
Definition: CSEInfo.h:195
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:498
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:81
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:676
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:391
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Value * getValOperand()
Definition: Instructions.h:815
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:87
unsigned getNumOperands() const
Definition: User.h:191
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Build and insert Res = G_MUL Op0, Op1.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
iterator end() const
Definition: ArrayRef.h:137
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:257
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:179
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:193
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:538
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:493
bool isIntPredicate() const
Definition: InstrTypes.h:739
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
amdgpu Simplify well known AMD library false FunctionCallee Callee
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:60
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:461
Representation of each machine instruction.
Definition: MachineInstr.h:63
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:721
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
void addObserver(GISelChangeObserver *O)
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:353
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:324
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:576
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
void removeObserver(GISelChangeObserver *O)
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:372
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:193
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1258
verify safepoint Safepoint IR Verifier
Value * getPointerOperand()
Definition: Instructions.h:811
TargetOptions Options
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1201
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:259
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:780
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
uint32_t Size
Definition: Profile.cpp:46
DILocalVariable * getVariable() const
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool isUnconditional() const
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
static void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
AsmDialect getDialect() const
Definition: InlineAsm.h:68
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
Multiway switch.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:482
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
succ_range successors(Instruction *I)
Definition: CFG.h:259
This file describes how to lower LLVM calls to machine code calls.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Simple wrapper observer that takes several observers, and calls each one for each event...
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1237
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:412
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:192
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:806
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:774
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:648
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164