LLVM  9.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 #define DEBUG_TYPE "irtranslator"
76 
77 using namespace llvm;
78 
79 static cl::opt<bool>
80  EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81  cl::desc("Should enable CSE in irtranslator"),
82  cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
84 
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
86  false, false)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90  false, false)
91 
97 
98  // Print the function name explicitly if we don't have a debug location (which
99  // makes the diagnostic less useful) or if we're going to emit a raw error.
100  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101  R << (" (in function: " + MF.getName() + ")").str();
102 
103  if (TPC.isGlobalISelAbortEnabled())
104  report_fatal_error(R.getMsg());
105  else
106  ORE.emit(R);
107 }
108 
111 }
112 
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118  const Instruction *CurrInst = nullptr;
119 
120 public:
121  DILocationVerifier() = default;
122  ~DILocationVerifier() = default;
123 
124  const Instruction *getCurrentInst() const { return CurrInst; }
125  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126 
127  void erasingInstr(MachineInstr &MI) override {}
128  void changingInstr(MachineInstr &MI) override {}
129  void changedInstr(MachineInstr &MI) override {}
130 
131  void createdInstr(MachineInstr &MI) override {
132  assert(getCurrentInst() && "Inserted instruction without a current MI");
133 
134  // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136  LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137  << " was copied to " << MI);
138 #endif
139  assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140  "Line info was not transferred to all instructions");
141  }
142 };
143 } // namespace
144 #endif // ifndef NDEBUG
145 
146 
153 }
154 
156 IRTranslator::allocateVRegs(const Value &Val) {
157  assert(!VMap.contains(Val) && "Value already allocated in VMap");
158  auto *Regs = VMap.getVRegs(Val);
159  auto *Offsets = VMap.getOffsets(Val);
160  SmallVector<LLT, 4> SplitTys;
161  computeValueLLTs(*DL, *Val.getType(), SplitTys,
162  Offsets->empty() ? Offsets : nullptr);
163  for (unsigned i = 0; i < SplitTys.size(); ++i)
164  Regs->push_back(0);
165  return *Regs;
166 }
167 
168 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
169  auto VRegsIt = VMap.findVRegs(Val);
170  if (VRegsIt != VMap.vregs_end())
171  return *VRegsIt->second;
172 
173  if (Val.getType()->isVoidTy())
174  return *VMap.getVRegs(Val);
175 
176  // Create entry for this type.
177  auto *VRegs = VMap.getVRegs(Val);
178  auto *Offsets = VMap.getOffsets(Val);
179 
180  assert(Val.getType()->isSized() &&
181  "Don't know how to create an empty vreg");
182 
183  SmallVector<LLT, 4> SplitTys;
184  computeValueLLTs(*DL, *Val.getType(), SplitTys,
185  Offsets->empty() ? Offsets : nullptr);
186 
187  if (!isa<Constant>(Val)) {
188  for (auto Ty : SplitTys)
189  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
190  return *VRegs;
191  }
192 
193  if (Val.getType()->isAggregateType()) {
194  // UndefValue, ConstantAggregateZero
195  auto &C = cast<Constant>(Val);
196  unsigned Idx = 0;
197  while (auto Elt = C.getAggregateElement(Idx++)) {
198  auto EltRegs = getOrCreateVRegs(*Elt);
199  llvm::copy(EltRegs, std::back_inserter(*VRegs));
200  }
201  } else {
202  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
203  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
204  bool Success = translate(cast<Constant>(Val), VRegs->front());
205  if (!Success) {
206  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
207  MF->getFunction().getSubprogram(),
208  &MF->getFunction().getEntryBlock());
209  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
210  reportTranslationError(*MF, *TPC, *ORE, R);
211  return *VRegs;
212  }
213  }
214 
215  return *VRegs;
216 }
217 
218 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
219  if (FrameIndices.find(&AI) != FrameIndices.end())
220  return FrameIndices[&AI];
221 
222  unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
223  unsigned Size =
224  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
225 
226  // Always allocate at least one byte.
227  Size = std::max(Size, 1u);
228 
229  unsigned Alignment = AI.getAlignment();
230  if (!Alignment)
231  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
232 
233  int &FI = FrameIndices[&AI];
234  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
235  return FI;
236 }
237 
238 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
239  unsigned Alignment = 0;
240  Type *ValTy = nullptr;
241  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
242  Alignment = SI->getAlignment();
243  ValTy = SI->getValueOperand()->getType();
244  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
245  Alignment = LI->getAlignment();
246  ValTy = LI->getType();
247  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
248  // TODO(PR27168): This instruction has no alignment attribute, but unlike
249  // the default alignment for load/store, the default here is to assume
250  // it has NATURAL alignment, not DataLayout-specified alignment.
251  const DataLayout &DL = AI->getModule()->getDataLayout();
252  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
253  ValTy = AI->getCompareOperand()->getType();
254  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
255  // TODO(PR27168): This instruction has no alignment attribute, but unlike
256  // the default alignment for load/store, the default here is to assume
257  // it has NATURAL alignment, not DataLayout-specified alignment.
258  const DataLayout &DL = AI->getModule()->getDataLayout();
259  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
260  ValTy = AI->getType();
261  } else {
262  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
263  R << "unable to translate memop: " << ore::NV("Opcode", &I);
264  reportTranslationError(*MF, *TPC, *ORE, R);
265  return 1;
266  }
267 
268  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
269 }
270 
271 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
272  MachineBasicBlock *&MBB = BBToMBB[&BB];
273  assert(MBB && "BasicBlock was not encountered before");
274  return *MBB;
275 }
276 
277 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
278  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
279  MachinePreds[Edge].push_back(NewPred);
280 }
281 
282 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
283  MachineIRBuilder &MIRBuilder) {
284  // FIXME: handle signed/unsigned wrapping flags.
285 
286  // Get or create a virtual register for each value.
287  // Unless the value is a Constant => loadimm cst?
288  // or inline constant each time?
289  // Creation of a virtual register needs to have a size.
290  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
291  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
292  unsigned Res = getOrCreateVReg(U);
293  uint16_t Flags = 0;
294  if (isa<Instruction>(U)) {
295  const Instruction &I = cast<Instruction>(U);
297  }
298 
299  MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
300  return true;
301 }
302 
303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
304  // -0.0 - X --> G_FNEG
305  if (isa<Constant>(U.getOperand(0)) &&
307  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
308  .addDef(getOrCreateVReg(U))
309  .addUse(getOrCreateVReg(*U.getOperand(1)));
310  return true;
311  }
312  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
313 }
314 
315 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
316  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
317  .addDef(getOrCreateVReg(U))
318  .addUse(getOrCreateVReg(*U.getOperand(0)));
319  return true;
320 }
321 
322 bool IRTranslator::translateCompare(const User &U,
323  MachineIRBuilder &MIRBuilder) {
324  const CmpInst *CI = dyn_cast<CmpInst>(&U);
325  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
326  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
327  unsigned Res = getOrCreateVReg(U);
328  CmpInst::Predicate Pred =
329  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
330  cast<ConstantExpr>(U).getPredicate());
331  if (CmpInst::isIntPredicate(Pred))
332  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
333  else if (Pred == CmpInst::FCMP_FALSE)
334  MIRBuilder.buildCopy(
335  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
336  else if (Pred == CmpInst::FCMP_TRUE)
337  MIRBuilder.buildCopy(
338  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
339  else {
340  MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
342  }
343 
344  return true;
345 }
346 
347 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
348  const ReturnInst &RI = cast<ReturnInst>(U);
349  const Value *Ret = RI.getReturnValue();
350  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
351  Ret = nullptr;
352 
353  ArrayRef<unsigned> VRegs;
354  if (Ret)
355  VRegs = getOrCreateVRegs(*Ret);
356 
357  unsigned SwiftErrorVReg = 0;
358  if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
359  SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
360  &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
361  }
362 
363  // The target may mess up with the insertion point, but
364  // this is not important as a return is the last instruction
365  // of the block anyway.
366  return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
367 }
368 
369 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
370  const BranchInst &BrInst = cast<BranchInst>(U);
371  unsigned Succ = 0;
372  if (!BrInst.isUnconditional()) {
373  // We want a G_BRCOND to the true BB followed by an unconditional branch.
374  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
375  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
376  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
377  MIRBuilder.buildBrCond(Tst, TrueBB);
378  }
379 
380  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
381  MachineBasicBlock &TgtBB = getMBB(BrTgt);
382  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
383 
384  // If the unconditional target is the layout successor, fallthrough.
385  if (!CurBB.isLayoutSuccessor(&TgtBB))
386  MIRBuilder.buildBr(TgtBB);
387 
388  // Link successors.
389  for (const BasicBlock *Succ : successors(&BrInst))
390  CurBB.addSuccessor(&getMBB(*Succ));
391  return true;
392 }
393 
394 bool IRTranslator::translateSwitch(const User &U,
395  MachineIRBuilder &MIRBuilder) {
396  // For now, just translate as a chain of conditional branches.
397  // FIXME: could we share most of the logic/code in
398  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
399  // At first sight, it seems most of the logic in there is independent of
400  // SelectionDAG-specifics and a lot of work went in to optimize switch
401  // lowering in there.
402 
403  const SwitchInst &SwInst = cast<SwitchInst>(U);
404  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
405  const BasicBlock *OrigBB = SwInst.getParent();
406 
407  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
408  for (auto &CaseIt : SwInst.cases()) {
409  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
410  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
411  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
412  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
413  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
414  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
415 
416  MIRBuilder.buildBrCond(Tst, TrueMBB);
417  CurMBB.addSuccessor(&TrueMBB);
418  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
419 
420  MachineBasicBlock *FalseMBB =
421  MF->CreateMachineBasicBlock(SwInst.getParent());
422  // Insert the comparison blocks one after the other.
423  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
424  MIRBuilder.buildBr(*FalseMBB);
425  CurMBB.addSuccessor(FalseMBB);
426 
427  MIRBuilder.setMBB(*FalseMBB);
428  }
429  // handle default case
430  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
431  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
432  MIRBuilder.buildBr(DefaultMBB);
433  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
434  CurMBB.addSuccessor(&DefaultMBB);
435  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
436 
437  return true;
438 }
439 
440 bool IRTranslator::translateIndirectBr(const User &U,
441  MachineIRBuilder &MIRBuilder) {
442  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
443 
444  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
445  MIRBuilder.buildBrIndirect(Tgt);
446 
447  // Link successors.
448  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
449  for (const BasicBlock *Succ : successors(&BrInst))
450  CurBB.addSuccessor(&getMBB(*Succ));
451 
452  return true;
453 }
454 
455 static bool isSwiftError(const Value *V) {
456  if (auto Arg = dyn_cast<Argument>(V))
457  return Arg->hasSwiftErrorAttr();
458  if (auto AI = dyn_cast<AllocaInst>(V))
459  return AI->isSwiftError();
460  return false;
461 }
462 
463 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
464  const LoadInst &LI = cast<LoadInst>(U);
465 
466  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
468  Flags |= MachineMemOperand::MOLoad;
469 
470  if (DL->getTypeStoreSize(LI.getType()) == 0)
471  return true;
472 
473  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
474  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
475  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
476 
477  Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
478  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
479 
480  if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
481  assert(Regs.size() == 1 && "swifterror should be single pointer");
482  unsigned VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
483  LI.getPointerOperand());
484  MIRBuilder.buildCopy(Regs[0], VReg);
485  return true;
486  }
487 
488 
489  for (unsigned i = 0; i < Regs.size(); ++i) {
490  unsigned Addr = 0;
491  MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
492 
493  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
494  unsigned BaseAlign = getMemOpAlignment(LI);
495  auto MMO = MF->getMachineMemOperand(
496  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
497  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
498  LI.getSyncScopeID(), LI.getOrdering());
499  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
500  }
501 
502  return true;
503 }
504 
505 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
506  const StoreInst &SI = cast<StoreInst>(U);
507  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
510 
511  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
512  return true;
513 
514  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
515  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
516  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
517 
518  Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
519  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
520 
521  if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
522  assert(Vals.size() == 1 && "swifterror should be single pointer");
523 
524  unsigned VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
525  SI.getPointerOperand());
526  MIRBuilder.buildCopy(VReg, Vals[0]);
527  return true;
528  }
529 
530  for (unsigned i = 0; i < Vals.size(); ++i) {
531  unsigned Addr = 0;
532  MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
533 
534  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
535  unsigned BaseAlign = getMemOpAlignment(SI);
536  auto MMO = MF->getMachineMemOperand(
537  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
538  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
539  SI.getSyncScopeID(), SI.getOrdering());
540  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
541  }
542  return true;
543 }
544 
545 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
546  const Value *Src = U.getOperand(0);
548 
549  // getIndexedOffsetInType is designed for GEPs, so the first index is the
550  // usual array element rather than looking into the actual aggregate.
551  SmallVector<Value *, 1> Indices;
552  Indices.push_back(ConstantInt::get(Int32Ty, 0));
553 
554  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
555  for (auto Idx : EVI->indices())
556  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
557  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
558  for (auto Idx : IVI->indices())
559  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
560  } else {
561  for (unsigned i = 1; i < U.getNumOperands(); ++i)
562  Indices.push_back(U.getOperand(i));
563  }
564 
565  return 8 * static_cast<uint64_t>(
566  DL.getIndexedOffsetInType(Src->getType(), Indices));
567 }
568 
569 bool IRTranslator::translateExtractValue(const User &U,
570  MachineIRBuilder &MIRBuilder) {
571  const Value *Src = U.getOperand(0);
572  uint64_t Offset = getOffsetFromIndices(U, *DL);
573  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
574  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
575  unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
576  auto &DstRegs = allocateVRegs(U);
577 
578  for (unsigned i = 0; i < DstRegs.size(); ++i)
579  DstRegs[i] = SrcRegs[Idx++];
580 
581  return true;
582 }
583 
584 bool IRTranslator::translateInsertValue(const User &U,
585  MachineIRBuilder &MIRBuilder) {
586  const Value *Src = U.getOperand(0);
587  uint64_t Offset = getOffsetFromIndices(U, *DL);
588  auto &DstRegs = allocateVRegs(U);
589  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
590  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
591  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
592  auto InsertedIt = InsertedRegs.begin();
593 
594  for (unsigned i = 0; i < DstRegs.size(); ++i) {
595  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
596  DstRegs[i] = *InsertedIt++;
597  else
598  DstRegs[i] = SrcRegs[i];
599  }
600 
601  return true;
602 }
603 
604 bool IRTranslator::translateSelect(const User &U,
605  MachineIRBuilder &MIRBuilder) {
606  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
607  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
608  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
609  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
610 
611  const SelectInst &SI = cast<SelectInst>(U);
612  uint16_t Flags = 0;
613  if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
615 
616  for (unsigned i = 0; i < ResRegs.size(); ++i) {
617  MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
618  {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
619  }
620 
621  return true;
622 }
623 
624 bool IRTranslator::translateBitCast(const User &U,
625  MachineIRBuilder &MIRBuilder) {
626  // If we're bitcasting to the source type, we can reuse the source vreg.
627  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
628  getLLTForType(*U.getType(), *DL)) {
629  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
630  auto &Regs = *VMap.getVRegs(U);
631  // If we already assigned a vreg for this bitcast, we can't change that.
632  // Emit a copy to satisfy the users we already emitted.
633  if (!Regs.empty())
634  MIRBuilder.buildCopy(Regs[0], SrcReg);
635  else {
636  Regs.push_back(SrcReg);
637  VMap.getOffsets(U)->push_back(0);
638  }
639  return true;
640  }
641  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
642 }
643 
644 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
645  MachineIRBuilder &MIRBuilder) {
646  unsigned Op = getOrCreateVReg(*U.getOperand(0));
647  unsigned Res = getOrCreateVReg(U);
648  MIRBuilder.buildInstr(Opcode, {Res}, {Op});
649  return true;
650 }
651 
652 bool IRTranslator::translateGetElementPtr(const User &U,
653  MachineIRBuilder &MIRBuilder) {
654  // FIXME: support vector GEPs.
655  if (U.getType()->isVectorTy())
656  return false;
657 
658  Value &Op0 = *U.getOperand(0);
659  unsigned BaseReg = getOrCreateVReg(Op0);
660  Type *PtrIRTy = Op0.getType();
661  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
662  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
663  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
664 
665  int64_t Offset = 0;
666  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
667  GTI != E; ++GTI) {
668  const Value *Idx = GTI.getOperand();
669  if (StructType *StTy = GTI.getStructTypeOrNull()) {
670  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
671  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
672  continue;
673  } else {
674  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
675 
676  // If this is a scalar constant or a splat vector of constants,
677  // handle it quickly.
678  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
679  Offset += ElementSize * CI->getSExtValue();
680  continue;
681  }
682 
683  if (Offset != 0) {
684  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
685  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
686  auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
687  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
688 
689  BaseReg = NewBaseReg;
690  Offset = 0;
691  }
692 
693  unsigned IdxReg = getOrCreateVReg(*Idx);
694  if (MRI->getType(IdxReg) != OffsetTy) {
695  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
696  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
697  IdxReg = NewIdxReg;
698  }
699 
700  // N = N + Idx * ElementSize;
701  // Avoid doing it for ElementSize of 1.
702  unsigned GepOffsetReg;
703  if (ElementSize != 1) {
704  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
705  auto ElementSizeMIB = MIRBuilder.buildConstant(
706  getLLTForType(*OffsetIRTy, *DL), ElementSize);
707  MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
708  } else
709  GepOffsetReg = IdxReg;
710 
711  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
712  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
713  BaseReg = NewBaseReg;
714  }
715  }
716 
717  if (Offset != 0) {
718  auto OffsetMIB =
719  MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
720  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
721  return true;
722  }
723 
724  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
725  return true;
726 }
727 
728 bool IRTranslator::translateMemfunc(const CallInst &CI,
729  MachineIRBuilder &MIRBuilder,
730  unsigned ID) {
731  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
732  Type *DstTy = CI.getArgOperand(0)->getType();
733  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
734  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
735  return false;
736 
738  for (int i = 0; i < 3; ++i) {
739  const auto &Arg = CI.getArgOperand(i);
740  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
741  }
742 
743  const char *Callee;
744  switch (ID) {
745  case Intrinsic::memmove:
746  case Intrinsic::memcpy: {
747  Type *SrcTy = CI.getArgOperand(1)->getType();
748  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
749  return false;
750  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
751  break;
752  }
753  case Intrinsic::memset:
754  Callee = "memset";
755  break;
756  default:
757  return false;
758  }
759 
760  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
761  MachineOperand::CreateES(Callee),
762  CallLowering::ArgInfo(0, CI.getType()), Args);
763 }
764 
765 void IRTranslator::getStackGuard(unsigned DstReg,
766  MachineIRBuilder &MIRBuilder) {
767  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
768  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
769  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
770  MIB.addDef(DstReg);
771 
772  auto &TLI = *MF->getSubtarget().getTargetLowering();
773  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
774  if (!Global)
775  return;
776 
777  MachinePointerInfo MPInfo(Global);
781  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
782  DL->getPointerABIAlignment(0));
783  MIB.setMemRefs({MemRef});
784 }
785 
786 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
787  MachineIRBuilder &MIRBuilder) {
788  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
789  MIRBuilder.buildInstr(Op)
790  .addDef(ResRegs[0])
791  .addDef(ResRegs[1])
792  .addUse(getOrCreateVReg(*CI.getOperand(0)))
793  .addUse(getOrCreateVReg(*CI.getOperand(1)));
794 
795  return true;
796 }
797 
798 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
799  switch (ID) {
800  default:
801  break;
802  case Intrinsic::bswap:
803  return TargetOpcode::G_BSWAP;
804  case Intrinsic::ceil:
805  return TargetOpcode::G_FCEIL;
806  case Intrinsic::cos:
807  return TargetOpcode::G_FCOS;
808  case Intrinsic::ctpop:
809  return TargetOpcode::G_CTPOP;
810  case Intrinsic::exp:
811  return TargetOpcode::G_FEXP;
812  case Intrinsic::exp2:
813  return TargetOpcode::G_FEXP2;
814  case Intrinsic::fabs:
815  return TargetOpcode::G_FABS;
816  case Intrinsic::copysign:
817  return TargetOpcode::G_FCOPYSIGN;
818  case Intrinsic::canonicalize:
819  return TargetOpcode::G_FCANONICALIZE;
820  case Intrinsic::floor:
821  return TargetOpcode::G_FFLOOR;
822  case Intrinsic::fma:
823  return TargetOpcode::G_FMA;
824  case Intrinsic::log:
825  return TargetOpcode::G_FLOG;
826  case Intrinsic::log2:
827  return TargetOpcode::G_FLOG2;
828  case Intrinsic::log10:
829  return TargetOpcode::G_FLOG10;
830  case Intrinsic::nearbyint:
831  return TargetOpcode::G_FNEARBYINT;
832  case Intrinsic::pow:
833  return TargetOpcode::G_FPOW;
834  case Intrinsic::rint:
835  return TargetOpcode::G_FRINT;
836  case Intrinsic::round:
837  return TargetOpcode::G_INTRINSIC_ROUND;
838  case Intrinsic::sin:
839  return TargetOpcode::G_FSIN;
840  case Intrinsic::sqrt:
841  return TargetOpcode::G_FSQRT;
842  case Intrinsic::trunc:
843  return TargetOpcode::G_INTRINSIC_TRUNC;
844  }
846 }
847 
848 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
849  Intrinsic::ID ID,
850  MachineIRBuilder &MIRBuilder) {
851 
852  unsigned Op = getSimpleIntrinsicOpcode(ID);
853 
854  // Is this a simple intrinsic?
855  if (Op == Intrinsic::not_intrinsic)
856  return false;
857 
858  // Yes. Let's translate it.
860  for (auto &Arg : CI.arg_operands())
861  VRegs.push_back(getOrCreateVReg(*Arg));
862 
863  MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
865  return true;
866 }
867 
868 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
869  MachineIRBuilder &MIRBuilder) {
870 
871  // If this is a simple intrinsic (that is, we just need to add a def of
872  // a vreg, and uses for each arg operand, then translate it.
873  if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
874  return true;
875 
876  switch (ID) {
877  default:
878  break;
879  case Intrinsic::lifetime_start:
880  case Intrinsic::lifetime_end: {
881  // No stack colouring in O0, discard region information.
882  if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
883  return true;
884 
885  unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
887 
888  // Get the underlying objects for the location passed on the lifetime
889  // marker.
891  GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
892 
893  // Iterate over each underlying object, creating lifetime markers for each
894  // static alloca. Quit if we find a non-static alloca.
895  for (const Value *V : Allocas) {
896  const AllocaInst *AI = dyn_cast<AllocaInst>(V);
897  if (!AI)
898  continue;
899 
900  if (!AI->isStaticAlloca())
901  return true;
902 
903  MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
904  }
905  return true;
906  }
907  case Intrinsic::dbg_declare: {
908  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
909  assert(DI.getVariable() && "Missing variable");
910 
911  const Value *Address = DI.getAddress();
912  if (!Address || isa<UndefValue>(Address)) {
913  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
914  return true;
915  }
916 
918  MIRBuilder.getDebugLoc()) &&
919  "Expected inlined-at fields to agree");
920  auto AI = dyn_cast<AllocaInst>(Address);
921  if (AI && AI->isStaticAlloca()) {
922  // Static allocas are tracked at the MF level, no need for DBG_VALUE
923  // instructions (in fact, they get ignored if they *do* exist).
924  MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
925  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
926  } else {
927  // A dbg.declare describes the address of a source variable, so lower it
928  // into an indirect DBG_VALUE.
929  MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
930  DI.getVariable(), DI.getExpression());
931  }
932  return true;
933  }
934  case Intrinsic::dbg_label: {
935  const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
936  assert(DI.getLabel() && "Missing label");
937 
939  MIRBuilder.getDebugLoc()) &&
940  "Expected inlined-at fields to agree");
941 
942  MIRBuilder.buildDbgLabel(DI.getLabel());
943  return true;
944  }
945  case Intrinsic::vaend:
946  // No target I know of cares about va_end. Certainly no in-tree target
947  // does. Simplest intrinsic ever!
948  return true;
949  case Intrinsic::vastart: {
950  auto &TLI = *MF->getSubtarget().getTargetLowering();
951  Value *Ptr = CI.getArgOperand(0);
952  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
953 
954  // FIXME: Get alignment
955  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
956  .addUse(getOrCreateVReg(*Ptr))
957  .addMemOperand(MF->getMachineMemOperand(
958  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
959  return true;
960  }
961  case Intrinsic::dbg_value: {
962  // This form of DBG_VALUE is target-independent.
963  const DbgValueInst &DI = cast<DbgValueInst>(CI);
964  const Value *V = DI.getValue();
966  MIRBuilder.getDebugLoc()) &&
967  "Expected inlined-at fields to agree");
968  if (!V) {
969  // Currently the optimizer can produce this; insert an undef to
970  // help debugging. Probably the optimizer should not do this.
971  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
972  } else if (const auto *CI = dyn_cast<Constant>(V)) {
973  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
974  } else {
975  unsigned Reg = getOrCreateVReg(*V);
976  // FIXME: This does not handle register-indirect values at offset 0. The
977  // direct/indirect thing shouldn't really be handled by something as
978  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
979  // pretty baked in right now.
980  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
981  }
982  return true;
983  }
984  case Intrinsic::uadd_with_overflow:
985  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
986  case Intrinsic::sadd_with_overflow:
987  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
988  case Intrinsic::usub_with_overflow:
989  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
990  case Intrinsic::ssub_with_overflow:
991  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
992  case Intrinsic::umul_with_overflow:
993  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
994  case Intrinsic::smul_with_overflow:
995  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
996  case Intrinsic::fmuladd: {
997  const TargetMachine &TM = MF->getTarget();
998  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
999  unsigned Dst = getOrCreateVReg(CI);
1000  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1001  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1002  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1004  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
1005  // TODO: Revisit this to see if we should move this part of the
1006  // lowering to the combiner.
1007  MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
1009  } else {
1010  LLT Ty = getLLTForType(*CI.getType(), *DL);
1011  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1013  MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1015  }
1016  return true;
1017  }
1018  case Intrinsic::memcpy:
1019  case Intrinsic::memmove:
1020  case Intrinsic::memset:
1021  return translateMemfunc(CI, MIRBuilder, ID);
1022  case Intrinsic::eh_typeid_for: {
1024  unsigned Reg = getOrCreateVReg(CI);
1025  unsigned TypeID = MF->getTypeIDFor(GV);
1026  MIRBuilder.buildConstant(Reg, TypeID);
1027  return true;
1028  }
1029  case Intrinsic::objectsize: {
1030  // If we don't know by now, we're never going to know.
1031  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
1032 
1033  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1034  return true;
1035  }
1036  case Intrinsic::is_constant:
1037  // If this wasn't constant-folded away by now, then it's not a
1038  // constant.
1039  MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1040  return true;
1041  case Intrinsic::stackguard:
1042  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1043  return true;
1044  case Intrinsic::stackprotector: {
1045  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1046  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1047  getStackGuard(GuardVal, MIRBuilder);
1048 
1049  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1050  int FI = getOrCreateFrameIndex(*Slot);
1051  MF->getFrameInfo().setStackProtectorIndex(FI);
1052 
1053  MIRBuilder.buildStore(
1054  GuardVal, getOrCreateVReg(*Slot),
1055  *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1058  PtrTy.getSizeInBits() / 8, 8));
1059  return true;
1060  }
1061  case Intrinsic::stacksave: {
1062  // Save the stack pointer to the location provided by the intrinsic.
1063  unsigned Reg = getOrCreateVReg(CI);
1064  unsigned StackPtr = MF->getSubtarget()
1065  .getTargetLowering()
1066  ->getStackPointerRegisterToSaveRestore();
1067 
1068  // If the target doesn't specify a stack pointer, then fall back.
1069  if (!StackPtr)
1070  return false;
1071 
1072  MIRBuilder.buildCopy(Reg, StackPtr);
1073  return true;
1074  }
1075  case Intrinsic::stackrestore: {
1076  // Restore the stack pointer from the location provided by the intrinsic.
1077  unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0));
1078  unsigned StackPtr = MF->getSubtarget()
1079  .getTargetLowering()
1080  ->getStackPointerRegisterToSaveRestore();
1081 
1082  // If the target doesn't specify a stack pointer, then fall back.
1083  if (!StackPtr)
1084  return false;
1085 
1086  MIRBuilder.buildCopy(StackPtr, Reg);
1087  return true;
1088  }
1089  case Intrinsic::cttz:
1090  case Intrinsic::ctlz: {
1091  ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1092  bool isTrailing = ID == Intrinsic::cttz;
1093  unsigned Opcode = isTrailing
1094  ? Cst->isZero() ? TargetOpcode::G_CTTZ
1095  : TargetOpcode::G_CTTZ_ZERO_UNDEF
1096  : Cst->isZero() ? TargetOpcode::G_CTLZ
1097  : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1098  MIRBuilder.buildInstr(Opcode)
1099  .addDef(getOrCreateVReg(CI))
1100  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1101  return true;
1102  }
1103  case Intrinsic::invariant_start: {
1104  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1105  unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1106  MIRBuilder.buildUndef(Undef);
1107  return true;
1108  }
1109  case Intrinsic::invariant_end:
1110  return true;
1111  }
1112  return false;
1113 }
1114 
1115 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1116  MachineIRBuilder &MIRBuilder) {
1117  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1118  if (!IA.getConstraintString().empty())
1119  return false;
1120 
1121  unsigned ExtraInfo = 0;
1122  if (IA.hasSideEffects())
1123  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1124  if (IA.getDialect() == InlineAsm::AD_Intel)
1125  ExtraInfo |= InlineAsm::Extra_AsmDialect;
1126 
1128  .addExternalSymbol(IA.getAsmString().c_str())
1129  .addImm(ExtraInfo);
1130 
1131  return true;
1132 }
1133 
1134 unsigned IRTranslator::packRegs(const Value &V,
1135  MachineIRBuilder &MIRBuilder) {
1136  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1137  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1138  LLT BigTy = getLLTForType(*V.getType(), *DL);
1139 
1140  if (Regs.size() == 1)
1141  return Regs[0];
1142 
1143  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1144  MIRBuilder.buildUndef(Dst);
1145  for (unsigned i = 0; i < Regs.size(); ++i) {
1146  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1147  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1148  Dst = NewDst;
1149  }
1150  return Dst;
1151 }
1152 
1153 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1154  MachineIRBuilder &MIRBuilder) {
1155  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1156  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1157 
1158  for (unsigned i = 0; i < Regs.size(); ++i)
1159  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1160 }
1161 
1162 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1163  const CallInst &CI = cast<CallInst>(U);
1164  auto TII = MF->getTarget().getIntrinsicInfo();
1165  const Function *F = CI.getCalledFunction();
1166 
1167  // FIXME: support Windows dllimport function calls.
1168  if (F && F->hasDLLImportStorageClass())
1169  return false;
1170 
1171  if (CI.isInlineAsm())
1172  return translateInlineAsm(CI, MIRBuilder);
1173 
1175  if (F && F->isIntrinsic()) {
1176  ID = F->getIntrinsicID();
1177  if (TII && ID == Intrinsic::not_intrinsic)
1178  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1179  }
1180 
1181  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1182  bool IsSplitType = valueIsSplit(CI);
1183  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1184  getLLTForType(*CI.getType(), *DL))
1185  : getOrCreateVReg(CI);
1186 
1188  unsigned SwiftErrorVReg = 0;
1189  for (auto &Arg: CI.arg_operands()) {
1190  if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1191  LLT Ty = getLLTForType(*Arg->getType(), *DL);
1192  unsigned InVReg = MRI->createGenericVirtualRegister(Ty);
1193  MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt(
1194  &CI, &MIRBuilder.getMBB(), Arg));
1195  Args.push_back(InVReg);
1196  SwiftErrorVReg =
1197  SwiftError.getOrCreateVRegDefAt(&CI, &MIRBuilder.getMBB(), Arg);
1198  continue;
1199  }
1200  Args.push_back(packRegs(*Arg, MIRBuilder));
1201  }
1202 
1203  MF->getFrameInfo().setHasCalls(true);
1204  bool Success =
1205  CLI->lowerCall(MIRBuilder, &CI, Res, Args, SwiftErrorVReg,
1206  [&]() { return getOrCreateVReg(*CI.getCalledValue()); });
1207 
1208  if (IsSplitType)
1209  unpackRegs(CI, Res, MIRBuilder);
1210 
1211  return Success;
1212  }
1213 
1214  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1215 
1216  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1217  return true;
1218 
1219  ArrayRef<unsigned> ResultRegs;
1220  if (!CI.getType()->isVoidTy())
1221  ResultRegs = getOrCreateVRegs(CI);
1222 
1223  MachineInstrBuilder MIB =
1224  MIRBuilder.buildIntrinsic(ID, ResultRegs, !CI.doesNotAccessMemory());
1225  if (isa<FPMathOperator>(CI))
1226  MIB->copyIRFlags(CI);
1227 
1228  for (auto &Arg : CI.arg_operands()) {
1229  // Some intrinsics take metadata parameters. Reject them.
1230  if (isa<MetadataAsValue>(Arg))
1231  return false;
1232  MIB.addUse(packRegs(*Arg, MIRBuilder));
1233  }
1234 
1235  // Add a MachineMemOperand if it is a target mem intrinsic.
1236  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1237  TargetLowering::IntrinsicInfo Info;
1238  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1239  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1240  unsigned Align = Info.align;
1241  if (Align == 0)
1242  Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
1243 
1244  uint64_t Size = Info.memVT.getStoreSize();
1245  MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1246  Info.flags, Size, Align));
1247  }
1248 
1249  return true;
1250 }
1251 
1252 bool IRTranslator::translateInvoke(const User &U,
1253  MachineIRBuilder &MIRBuilder) {
1254  const InvokeInst &I = cast<InvokeInst>(U);
1255  MCContext &Context = MF->getContext();
1256 
1257  const BasicBlock *ReturnBB = I.getSuccessor(0);
1258  const BasicBlock *EHPadBB = I.getSuccessor(1);
1259 
1260  const Value *Callee = I.getCalledValue();
1261  const Function *Fn = dyn_cast<Function>(Callee);
1262  if (isa<InlineAsm>(Callee))
1263  return false;
1264 
1265  // FIXME: support invoking patchpoint and statepoint intrinsics.
1266  if (Fn && Fn->isIntrinsic())
1267  return false;
1268 
1269  // FIXME: support whatever these are.
1271  return false;
1272 
1273  // FIXME: support Windows exception handling.
1274  if (!isa<LandingPadInst>(EHPadBB->front()))
1275  return false;
1276 
1277  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1278  // the region covered by the try.
1279  MCSymbol *BeginSymbol = Context.createTempSymbol();
1280  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1281 
1282  unsigned Res = 0;
1283  if (!I.getType()->isVoidTy())
1284  Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1286  unsigned SwiftErrorVReg = 0;
1287  for (auto &Arg : I.arg_operands()) {
1288  if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1289  LLT Ty = getLLTForType(*Arg->getType(), *DL);
1290  unsigned InVReg = MRI->createGenericVirtualRegister(Ty);
1291  MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt(
1292  &I, &MIRBuilder.getMBB(), Arg));
1293  Args.push_back(InVReg);
1294  SwiftErrorVReg =
1295  SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
1296  continue;
1297  }
1298 
1299  Args.push_back(packRegs(*Arg, MIRBuilder));
1300  }
1301 
1302  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, SwiftErrorVReg,
1303  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1304  return false;
1305 
1306  unpackRegs(I, Res, MIRBuilder);
1307 
1308  MCSymbol *EndSymbol = Context.createTempSymbol();
1309  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1310 
1311  // FIXME: track probabilities.
1312  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1313  &ReturnMBB = getMBB(*ReturnBB);
1314  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1315  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1316  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1317  MIRBuilder.buildBr(ReturnMBB);
1318 
1319  return true;
1320 }
1321 
1322 bool IRTranslator::translateCallBr(const User &U,
1323  MachineIRBuilder &MIRBuilder) {
1324  // FIXME: Implement this.
1325  return false;
1326 }
1327 
1328 bool IRTranslator::translateLandingPad(const User &U,
1329  MachineIRBuilder &MIRBuilder) {
1330  const LandingPadInst &LP = cast<LandingPadInst>(U);
1331 
1332  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1333 
1334  MBB.setIsEHPad();
1335 
1336  // If there aren't registers to copy the values into (e.g., during SjLj
1337  // exceptions), then don't bother.
1338  auto &TLI = *MF->getSubtarget().getTargetLowering();
1339  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1340  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1341  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1342  return true;
1343 
1344  // If landingpad's return type is token type, we don't create DAG nodes
1345  // for its exception pointer and selector value. The extraction of exception
1346  // pointer or selector value from token type landingpads is not currently
1347  // supported.
1348  if (LP.getType()->isTokenTy())
1349  return true;
1350 
1351  // Add a label to mark the beginning of the landing pad. Deletion of the
1352  // landing pad can thus be detected via the MachineModuleInfo.
1354  .addSym(MF->addLandingPad(&MBB));
1355 
1356  LLT Ty = getLLTForType(*LP.getType(), *DL);
1357  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1358  MIRBuilder.buildUndef(Undef);
1359 
1360  SmallVector<LLT, 2> Tys;
1361  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1362  Tys.push_back(getLLTForType(*Ty, *DL));
1363  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1364 
1365  // Mark exception register as live in.
1366  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1367  if (!ExceptionReg)
1368  return false;
1369 
1370  MBB.addLiveIn(ExceptionReg);
1371  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1372  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1373 
1374  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1375  if (!SelectorReg)
1376  return false;
1377 
1378  MBB.addLiveIn(SelectorReg);
1379  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1380  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1381  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1382 
1383  return true;
1384 }
1385 
1386 bool IRTranslator::translateAlloca(const User &U,
1387  MachineIRBuilder &MIRBuilder) {
1388  auto &AI = cast<AllocaInst>(U);
1389 
1390  if (AI.isSwiftError())
1391  return true;
1392 
1393  if (AI.isStaticAlloca()) {
1394  unsigned Res = getOrCreateVReg(AI);
1395  int FI = getOrCreateFrameIndex(AI);
1396  MIRBuilder.buildFrameIndex(Res, FI);
1397  return true;
1398  }
1399 
1400  // FIXME: support stack probing for Windows.
1401  if (MF->getTarget().getTargetTriple().isOSWindows())
1402  return false;
1403 
1404  // Now we're in the harder dynamic case.
1405  Type *Ty = AI.getAllocatedType();
1406  unsigned Align =
1407  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1408 
1409  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1410 
1411  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1412  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1413  if (MRI->getType(NumElts) != IntPtrTy) {
1414  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1415  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1416  NumElts = ExtElts;
1417  }
1418 
1419  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1420  unsigned TySize =
1421  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1422  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1423 
1424  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1425  auto &TLI = *MF->getSubtarget().getTargetLowering();
1426  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1427 
1428  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1429  MIRBuilder.buildCopy(SPTmp, SPReg);
1430 
1431  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1432  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1433 
1434  // Handle alignment. We have to realign if the allocation granule was smaller
1435  // than stack alignment, or the specific alloca requires more than stack
1436  // alignment.
1437  unsigned StackAlign =
1438  MF->getSubtarget().getFrameLowering()->getStackAlignment();
1439  Align = std::max(Align, StackAlign);
1440  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1441  // Round the size of the allocation up to the stack alignment size
1442  // by add SA-1 to the size. This doesn't overflow because we're computing
1443  // an address inside an alloca.
1444  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1445  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1446  AllocTmp = AlignedAlloc;
1447  }
1448 
1449  MIRBuilder.buildCopy(SPReg, AllocTmp);
1450  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1451 
1452  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1453  assert(MF->getFrameInfo().hasVarSizedObjects());
1454  return true;
1455 }
1456 
1457 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1458  // FIXME: We may need more info about the type. Because of how LLT works,
1459  // we're completely discarding the i64/double distinction here (amongst
1460  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1461  // anyway but that's not guaranteed.
1462  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1463  .addDef(getOrCreateVReg(U))
1464  .addUse(getOrCreateVReg(*U.getOperand(0)))
1465  .addImm(DL->getABITypeAlignment(U.getType()));
1466  return true;
1467 }
1468 
1469 bool IRTranslator::translateInsertElement(const User &U,
1470  MachineIRBuilder &MIRBuilder) {
1471  // If it is a <1 x Ty> vector, use the scalar as it is
1472  // not a legal vector type in LLT.
1473  if (U.getType()->getVectorNumElements() == 1) {
1474  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1475  auto &Regs = *VMap.getVRegs(U);
1476  if (Regs.empty()) {
1477  Regs.push_back(Elt);
1478  VMap.getOffsets(U)->push_back(0);
1479  } else {
1480  MIRBuilder.buildCopy(Regs[0], Elt);
1481  }
1482  return true;
1483  }
1484 
1485  unsigned Res = getOrCreateVReg(U);
1486  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1487  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1488  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1489  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1490  return true;
1491 }
1492 
1493 bool IRTranslator::translateExtractElement(const User &U,
1494  MachineIRBuilder &MIRBuilder) {
1495  // If it is a <1 x Ty> vector, use the scalar as it is
1496  // not a legal vector type in LLT.
1497  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1498  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1499  auto &Regs = *VMap.getVRegs(U);
1500  if (Regs.empty()) {
1501  Regs.push_back(Elt);
1502  VMap.getOffsets(U)->push_back(0);
1503  } else {
1504  MIRBuilder.buildCopy(Regs[0], Elt);
1505  }
1506  return true;
1507  }
1508  unsigned Res = getOrCreateVReg(U);
1509  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1510  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1511  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1512  unsigned Idx = 0;
1513  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1514  if (CI->getBitWidth() != PreferredVecIdxWidth) {
1515  APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1516  auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1517  Idx = getOrCreateVReg(*NewIdxCI);
1518  }
1519  }
1520  if (!Idx)
1521  Idx = getOrCreateVReg(*U.getOperand(1));
1522  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1523  const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1524  Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1525  }
1526  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1527  return true;
1528 }
1529 
1530 bool IRTranslator::translateShuffleVector(const User &U,
1531  MachineIRBuilder &MIRBuilder) {
1532  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1533  .addDef(getOrCreateVReg(U))
1534  .addUse(getOrCreateVReg(*U.getOperand(0)))
1535  .addUse(getOrCreateVReg(*U.getOperand(1)))
1536  .addUse(getOrCreateVReg(*U.getOperand(2)));
1537  return true;
1538 }
1539 
1540 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1541  const PHINode &PI = cast<PHINode>(U);
1542 
1544  for (auto Reg : getOrCreateVRegs(PI)) {
1545  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1546  Insts.push_back(MIB.getInstr());
1547  }
1548 
1549  PendingPHIs.emplace_back(&PI, std::move(Insts));
1550  return true;
1551 }
1552 
1553 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1554  MachineIRBuilder &MIRBuilder) {
1555  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1556 
1557  if (I.isWeak())
1558  return false;
1559 
1560  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1563 
1564  Type *ResType = I.getType();
1565  Type *ValType = ResType->Type::getStructElementType(0);
1566 
1567  auto Res = getOrCreateVRegs(I);
1568  unsigned OldValRes = Res[0];
1569  unsigned SuccessRes = Res[1];
1570  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1571  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1572  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1573 
1574  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1575  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1576  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1577  Flags, DL->getTypeStoreSize(ValType),
1578  getMemOpAlignment(I), AAMDNodes(), nullptr,
1580  I.getFailureOrdering()));
1581  return true;
1582 }
1583 
1584 bool IRTranslator::translateAtomicRMW(const User &U,
1585  MachineIRBuilder &MIRBuilder) {
1586  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1587 
1588  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1591 
1592  Type *ResType = I.getType();
1593 
1594  unsigned Res = getOrCreateVReg(I);
1595  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1596  unsigned Val = getOrCreateVReg(*I.getValOperand());
1597 
1598  unsigned Opcode = 0;
1599  switch (I.getOperation()) {
1600  default:
1601  llvm_unreachable("Unknown atomicrmw op");
1602  return false;
1603  case AtomicRMWInst::Xchg:
1604  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1605  break;
1606  case AtomicRMWInst::Add:
1607  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1608  break;
1609  case AtomicRMWInst::Sub:
1610  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1611  break;
1612  case AtomicRMWInst::And:
1613  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1614  break;
1615  case AtomicRMWInst::Nand:
1616  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1617  break;
1618  case AtomicRMWInst::Or:
1619  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1620  break;
1621  case AtomicRMWInst::Xor:
1622  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1623  break;
1624  case AtomicRMWInst::Max:
1625  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1626  break;
1627  case AtomicRMWInst::Min:
1628  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1629  break;
1630  case AtomicRMWInst::UMax:
1631  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1632  break;
1633  case AtomicRMWInst::UMin:
1634  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1635  break;
1636  }
1637 
1638  MIRBuilder.buildAtomicRMW(
1639  Opcode, Res, Addr, Val,
1640  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1641  Flags, DL->getTypeStoreSize(ResType),
1642  getMemOpAlignment(I), AAMDNodes(), nullptr,
1643  I.getSyncScopeID(), I.getOrdering()));
1644  return true;
1645 }
1646 
1647 void IRTranslator::finishPendingPhis() {
1648 #ifndef NDEBUG
1649  DILocationVerifier Verifier;
1650  GISelObserverWrapper WrapperObserver(&Verifier);
1651  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1652 #endif // ifndef NDEBUG
1653  for (auto &Phi : PendingPHIs) {
1654  const PHINode *PI = Phi.first;
1655  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1656  EntryBuilder->setDebugLoc(PI->getDebugLoc());
1657 #ifndef NDEBUG
1658  Verifier.setCurrentInst(PI);
1659 #endif // ifndef NDEBUG
1660 
1661  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1662  // won't create extra control flow here, otherwise we need to find the
1663  // dominating predecessor here (or perhaps force the weirder IRTranslators
1664  // to provide a simple boundary).
1665  SmallSet<const BasicBlock *, 4> HandledPreds;
1666 
1667  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1668  auto IRPred = PI->getIncomingBlock(i);
1669  if (HandledPreds.count(IRPred))
1670  continue;
1671 
1672  HandledPreds.insert(IRPred);
1673  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1674  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1675  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1676  "incorrect CFG at MachineBasicBlock level");
1677  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1678  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1679  MIB.addUse(ValRegs[j]);
1680  MIB.addMBB(Pred);
1681  }
1682  }
1683  }
1684  }
1685 }
1686 
1687 bool IRTranslator::valueIsSplit(const Value &V,
1689  SmallVector<LLT, 4> SplitTys;
1690  if (Offsets && !Offsets->empty())
1691  Offsets->clear();
1692  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1693  return SplitTys.size() > 1;
1694 }
1695 
1696 bool IRTranslator::translate(const Instruction &Inst) {
1697  CurBuilder->setDebugLoc(Inst.getDebugLoc());
1698  EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1699  switch(Inst.getOpcode()) {
1700 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1701  case Instruction::OPCODE: \
1702  return translate##OPCODE(Inst, *CurBuilder.get());
1703 #include "llvm/IR/Instruction.def"
1704  default:
1705  return false;
1706  }
1707 }
1708 
1709 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1710  if (auto CI = dyn_cast<ConstantInt>(&C))
1711  EntryBuilder->buildConstant(Reg, *CI);
1712  else if (auto CF = dyn_cast<ConstantFP>(&C))
1713  EntryBuilder->buildFConstant(Reg, *CF);
1714  else if (isa<UndefValue>(C))
1715  EntryBuilder->buildUndef(Reg);
1716  else if (isa<ConstantPointerNull>(C)) {
1717  // As we are trying to build a constant val of 0 into a pointer,
1718  // insert a cast to make them correct with respect to types.
1719  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1720  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1721  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1722  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1723  EntryBuilder->buildCast(Reg, ZeroReg);
1724  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1725  EntryBuilder->buildGlobalValue(Reg, GV);
1726  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1727  if (!CAZ->getType()->isVectorTy())
1728  return false;
1729  // Return the scalar if it is a <1 x Ty> vector.
1730  if (CAZ->getNumElements() == 1)
1731  return translate(*CAZ->getElementValue(0u), Reg);
1733  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1734  Constant &Elt = *CAZ->getElementValue(i);
1735  Ops.push_back(getOrCreateVReg(Elt));
1736  }
1737  EntryBuilder->buildBuildVector(Reg, Ops);
1738  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1739  // Return the scalar if it is a <1 x Ty> vector.
1740  if (CV->getNumElements() == 1)
1741  return translate(*CV->getElementAsConstant(0), Reg);
1743  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1744  Constant &Elt = *CV->getElementAsConstant(i);
1745  Ops.push_back(getOrCreateVReg(Elt));
1746  }
1747  EntryBuilder->buildBuildVector(Reg, Ops);
1748  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1749  switch(CE->getOpcode()) {
1750 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1751  case Instruction::OPCODE: \
1752  return translate##OPCODE(*CE, *EntryBuilder.get());
1753 #include "llvm/IR/Instruction.def"
1754  default:
1755  return false;
1756  }
1757  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1758  if (CV->getNumOperands() == 1)
1759  return translate(*CV->getOperand(0), Reg);
1761  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1762  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1763  }
1764  EntryBuilder->buildBuildVector(Reg, Ops);
1765  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1766  EntryBuilder->buildBlockAddress(Reg, BA);
1767  } else
1768  return false;
1769 
1770  return true;
1771 }
1772 
1773 void IRTranslator::finalizeFunction() {
1774  // Release the memory used by the different maps we
1775  // needed during the translation.
1776  PendingPHIs.clear();
1777  VMap.reset();
1778  FrameIndices.clear();
1779  MachinePreds.clear();
1780  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1781  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1782  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1783  EntryBuilder.reset();
1784  CurBuilder.reset();
1785 }
1786 
1788  MF = &CurMF;
1789  const Function &F = MF->getFunction();
1790  if (F.empty())
1791  return false;
1793  getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1794  // Set the CSEConfig and run the analysis.
1795  GISelCSEInfo *CSEInfo = nullptr;
1796  TPC = &getAnalysis<TargetPassConfig>();
1797  bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
1799  : TPC->isGISelCSEEnabled();
1800 
1801  if (EnableCSE) {
1802  EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1803  CSEInfo = &Wrapper.get(TPC->getCSEConfig());
1804  EntryBuilder->setCSEInfo(CSEInfo);
1805  CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1806  CurBuilder->setCSEInfo(CSEInfo);
1807  } else {
1808  EntryBuilder = make_unique<MachineIRBuilder>();
1809  CurBuilder = make_unique<MachineIRBuilder>();
1810  }
1811  CLI = MF->getSubtarget().getCallLowering();
1812  CurBuilder->setMF(*MF);
1813  EntryBuilder->setMF(*MF);
1814  MRI = &MF->getRegInfo();
1815  DL = &F.getParent()->getDataLayout();
1816  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1817 
1818  assert(PendingPHIs.empty() && "stale PHIs");
1819 
1820  if (!DL->isLittleEndian()) {
1821  // Currently we don't properly handle big endian code.
1822  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1823  F.getSubprogram(), &F.getEntryBlock());
1824  R << "unable to translate in big endian mode";
1825  reportTranslationError(*MF, *TPC, *ORE, R);
1826  }
1827 
1828  // Release the per-function state when we return, whether we succeeded or not.
1829  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1830 
1831  // Setup a separate basic-block for the arguments and constants
1832  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1833  MF->push_back(EntryBB);
1834  EntryBuilder->setMBB(*EntryBB);
1835 
1837  SwiftError.setFunction(CurMF);
1838  SwiftError.createEntriesInEntryBlock(DbgLoc);
1839 
1840  // Create all blocks, in IR order, to preserve the layout.
1841  for (const BasicBlock &BB: F) {
1842  auto *&MBB = BBToMBB[&BB];
1843 
1844  MBB = MF->CreateMachineBasicBlock(&BB);
1845  MF->push_back(MBB);
1846 
1847  if (BB.hasAddressTaken())
1848  MBB->setHasAddressTaken();
1849  }
1850 
1851  // Make our arguments/constants entry block fallthrough to the IR entry block.
1852  EntryBB->addSuccessor(&getMBB(F.front()));
1853 
1854  // Lower the actual args into this basic block.
1855  SmallVector<unsigned, 8> VRegArgs;
1856  for (const Argument &Arg: F.args()) {
1857  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1858  continue; // Don't handle zero sized types.
1859  VRegArgs.push_back(
1860  MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1861 
1862  if (Arg.hasSwiftErrorAttr())
1863  SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(),
1864  VRegArgs.back());
1865  }
1866 
1867  // We don't currently support translating swifterror or swiftself functions.
1868  for (auto &Arg : F.args()) {
1869  if (Arg.hasSwiftSelfAttr()) {
1870  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1871  F.getSubprogram(), &F.getEntryBlock());
1872  R << "unable to lower arguments due to swiftself: "
1873  << ore::NV("Prototype", F.getType());
1874  reportTranslationError(*MF, *TPC, *ORE, R);
1875  return false;
1876  }
1877  }
1878 
1879  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1880  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1881  F.getSubprogram(), &F.getEntryBlock());
1882  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1883  reportTranslationError(*MF, *TPC, *ORE, R);
1884  return false;
1885  }
1886 
1887  auto ArgIt = F.arg_begin();
1888  for (auto &VArg : VRegArgs) {
1889  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1890  // creating redundant copies.
1891  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1892  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1893  assert(VRegs.empty() && "VRegs already populated?");
1894  VRegs.push_back(VArg);
1895  } else {
1896  unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1897  }
1898  ArgIt++;
1899  }
1900 
1901  // Need to visit defs before uses when translating instructions.
1902  GISelObserverWrapper WrapperObserver;
1903  if (EnableCSE && CSEInfo)
1904  WrapperObserver.addObserver(CSEInfo);
1905  {
1907 #ifndef NDEBUG
1908  DILocationVerifier Verifier;
1909  WrapperObserver.addObserver(&Verifier);
1910 #endif // ifndef NDEBUG
1911  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1912  for (const BasicBlock *BB : RPOT) {
1913  MachineBasicBlock &MBB = getMBB(*BB);
1914  // Set the insertion point of all the following translations to
1915  // the end of this basic block.
1916  CurBuilder->setMBB(MBB);
1917 
1918  for (const Instruction &Inst : *BB) {
1919 #ifndef NDEBUG
1920  Verifier.setCurrentInst(&Inst);
1921 #endif // ifndef NDEBUG
1922  if (translate(Inst))
1923  continue;
1924 
1925  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1926  Inst.getDebugLoc(), BB);
1927  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1928 
1929  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1930  std::string InstStrStorage;
1931  raw_string_ostream InstStr(InstStrStorage);
1932  InstStr << Inst;
1933 
1934  R << ": '" << InstStr.str() << "'";
1935  }
1936 
1937  reportTranslationError(*MF, *TPC, *ORE, R);
1938  return false;
1939  }
1940  }
1941 #ifndef NDEBUG
1942  WrapperObserver.removeObserver(&Verifier);
1943 #endif
1944  }
1945 
1946  finishPendingPhis();
1947 
1948  SwiftError.propagateVRegs();
1949 
1950  // Merge the argument lowering and constants block with its single
1951  // successor, the LLVM-IR entry block. We want the basic block to
1952  // be maximal.
1953  assert(EntryBB->succ_size() == 1 &&
1954  "Custom BB used for lowering should have only one successor");
1955  // Get the successor of the current entry block.
1956  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1957  assert(NewEntryBB.pred_size() == 1 &&
1958  "LLVM-IR entry block has a predecessor!?");
1959  // Move all the instruction from the current entry block to the
1960  // new entry block.
1961  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1962  EntryBB->end());
1963 
1964  // Update the live-in information for the new entry block.
1965  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1966  NewEntryBB.addLiveIn(LiveIn);
1967  NewEntryBB.sortUniqueLiveIns();
1968 
1969  // Get rid of the now empty basic block.
1970  EntryBB->removeSuccessor(&NewEntryBB);
1971  MF->remove(EntryBB);
1972  MF->DeleteMachineBasicBlock(EntryBB);
1973 
1974  assert(&MF->front() == &NewEntryBB &&
1975  "New entry wasn't next in the list of basic block!");
1976 
1977  // Initialize stack protector information.
1978  StackProtector &SP = getAnalysis<StackProtector>();
1979  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1980 
1981  return false;
1982 }
auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1281
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:409
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:198
A simple RAII based CSEInfo installer.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:699
bool empty() const
Definition: Function.h:667
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:172
The CSE Analysis object.
Definition: CSEInfo.h:71
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
*p = old <signed v ? old : v
Definition: Instructions.h:721
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
LLVMContext & Context
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1528
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
DiagnosticInfoOptimizationBase::Argument NV
This represents the llvm.dbg.label instruction.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:364
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
iterator begin() const
Definition: ArrayRef.h:136
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:528
void push_back(const T &Elt)
Definition: SmallVector.h:211
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< unsigned > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
unsigned getReg() const
getReg - Returns the register number.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:384
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:80
*p = old <unsigned v ? old : v
Definition: Instructions.h:725
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:135
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:247
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1045
*p = old >unsigned v ? old : v
Definition: Instructions.h:723
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:709
Type * getPointerOperandType() const
Definition: Instructions.h:415
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:261
BasicBlock * getSuccessor(unsigned i) const
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
F(f)
The actual analysis pass wrapper.
Definition: CSEInfo.h:218
An instruction for reading from memory.
Definition: Instructions.h:167
void setMF(MachineFunction &MF)
Definition: CSEInfo.cpp:77
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:158
*p = old >signed v ? old : v
Definition: Instructions.h:719
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:595
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool hasSideEffects() const
Definition: InlineAsm.h:66
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1218
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
A description of a memory reference used in the backend.
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1755
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
#define DEBUG_TYPE
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
Class to represent struct types.
Definition: DerivedTypes.h:232
DILabel * getLabel() const
BinOp getOperation() const
Definition: Instructions.h:750
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:569
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:557
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:695
Context object for machine code objects.
Definition: MCContext.h:62
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
Definition: Lint.cpp:83
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1210
An instruction for storing to memory.
Definition: Instructions.h:320
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:877
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
The memory access is volatile.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
const BasicBlock & getEntryBlock() const
Definition: Function.h:645
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
Abstract class that contains various methods for clients to notify about changes. ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:189
void GetUnderlyingObjects(const Value *V, SmallVectorImpl< const Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:802
unsigned const MachineRegisterInfo * MRI
Value * getCalledValue() const
Definition: InstrTypes.h:1257
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1504
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Value * getAddress() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1333
Value * getValue() const
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:280
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:127
Indirect Branch Instruction.
Helper class to build MachineInstr.
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
GISelCSEInfo & get(std::unique_ptr< CSEConfigBase > CSEOpt, bool ReCompute=false)
Takes a CSEConfigBase object that defines what opcodes get CSEd.
Definition: CSEInfo.cpp:363
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:709
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Value * getPointerOperand()
Definition: Instructions.h:284
self_iterator getIterator()
Definition: ilist_node.h:81
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:328
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:726
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:196
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:608
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static uint16_t copyFlagsFromInstruction(const Instruction &I)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:789
Simple wrapper that does the following.
Definition: CSEInfo.h:200
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:498
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:81
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:703
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:391
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Value * getValOperand()
Definition: Instructions.h:815
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:87
unsigned getNumOperands() const
Definition: User.h:191
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Build and insert Res = G_MUL Op0, Op1.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
iterator end() const
Definition: ArrayRef.h:137
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:257
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:179
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:193
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:538
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:493
bool isIntPredicate() const
Definition: InstrTypes.h:802
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
amdgpu Simplify well known AMD library false FunctionCallee Callee
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:61
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
Definition: MachineInstr.h:63
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:784
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
void addObserver(GISelChangeObserver *O)
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:353
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:321
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Type * getPointerOperandType() const
Definition: Instructions.h:287
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
void removeObserver(GISelChangeObserver *O)
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:372
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:193
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1321
verify safepoint Safepoint IR Verifier
Value * getPointerOperand()
Definition: Instructions.h:811
TargetOptions Options
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1264
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:259
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:780
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
DILocalVariable * getVariable() const
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool isUnconditional() const
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
AsmDialect getDialect() const
Definition: InlineAsm.h:68
Multiway switch.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:482
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
static bool isSwiftError(const Value *V)
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
succ_range successors(Instruction *I)
Definition: CFG.h:259
This file describes how to lower LLVM calls to machine code calls.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Simple wrapper observer that takes several observers, and calls each one for each event...
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1237
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:412
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:192
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:806
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:774
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:711
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164