LLVM  9.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 #define DEBUG_TYPE "irtranslator"
76 
77 using namespace llvm;
78 
79 static cl::opt<bool>
80  EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81  cl::desc("Should enable CSE in irtranslator"),
82  cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
84 
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
86  false, false)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90  false, false)
91 
97 
98  // Print the function name explicitly if we don't have a debug location (which
99  // makes the diagnostic less useful) or if we're going to emit a raw error.
100  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101  R << (" (in function: " + MF.getName() + ")").str();
102 
103  if (TPC.isGlobalISelAbortEnabled())
104  report_fatal_error(R.getMsg());
105  else
106  ORE.emit(R);
107 }
108 
111 }
112 
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118  const Instruction *CurrInst = nullptr;
119 
120 public:
121  DILocationVerifier() = default;
122  ~DILocationVerifier() = default;
123 
124  const Instruction *getCurrentInst() const { return CurrInst; }
125  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126 
127  void erasingInstr(MachineInstr &MI) override {}
128  void changingInstr(MachineInstr &MI) override {}
129  void changedInstr(MachineInstr &MI) override {}
130 
131  void createdInstr(MachineInstr &MI) override {
132  assert(getCurrentInst() && "Inserted instruction without a current MI");
133 
134  // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136  LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137  << " was copied to " << MI);
138 #endif
139  assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140  "Line info was not transferred to all instructions");
141  }
142 };
143 } // namespace
144 #endif // ifndef NDEBUG
145 
146 
153 }
154 
156 IRTranslator::allocateVRegs(const Value &Val) {
157  assert(!VMap.contains(Val) && "Value already allocated in VMap");
158  auto *Regs = VMap.getVRegs(Val);
159  auto *Offsets = VMap.getOffsets(Val);
160  SmallVector<LLT, 4> SplitTys;
161  computeValueLLTs(*DL, *Val.getType(), SplitTys,
162  Offsets->empty() ? Offsets : nullptr);
163  for (unsigned i = 0; i < SplitTys.size(); ++i)
164  Regs->push_back(0);
165  return *Regs;
166 }
167 
168 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
169  auto VRegsIt = VMap.findVRegs(Val);
170  if (VRegsIt != VMap.vregs_end())
171  return *VRegsIt->second;
172 
173  if (Val.getType()->isVoidTy())
174  return *VMap.getVRegs(Val);
175 
176  // Create entry for this type.
177  auto *VRegs = VMap.getVRegs(Val);
178  auto *Offsets = VMap.getOffsets(Val);
179 
180  assert(Val.getType()->isSized() &&
181  "Don't know how to create an empty vreg");
182 
183  SmallVector<LLT, 4> SplitTys;
184  computeValueLLTs(*DL, *Val.getType(), SplitTys,
185  Offsets->empty() ? Offsets : nullptr);
186 
187  if (!isa<Constant>(Val)) {
188  for (auto Ty : SplitTys)
189  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
190  return *VRegs;
191  }
192 
193  if (Val.getType()->isAggregateType()) {
194  // UndefValue, ConstantAggregateZero
195  auto &C = cast<Constant>(Val);
196  unsigned Idx = 0;
197  while (auto Elt = C.getAggregateElement(Idx++)) {
198  auto EltRegs = getOrCreateVRegs(*Elt);
199  llvm::copy(EltRegs, std::back_inserter(*VRegs));
200  }
201  } else {
202  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
203  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
204  bool Success = translate(cast<Constant>(Val), VRegs->front());
205  if (!Success) {
206  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
207  MF->getFunction().getSubprogram(),
208  &MF->getFunction().getEntryBlock());
209  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
210  reportTranslationError(*MF, *TPC, *ORE, R);
211  return *VRegs;
212  }
213  }
214 
215  return *VRegs;
216 }
217 
218 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
219  if (FrameIndices.find(&AI) != FrameIndices.end())
220  return FrameIndices[&AI];
221 
222  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
223  unsigned Size =
224  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
225 
226  // Always allocate at least one byte.
227  Size = std::max(Size, 1u);
228 
229  unsigned Alignment = AI.getAlignment();
230  if (!Alignment)
231  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
232 
233  int &FI = FrameIndices[&AI];
234  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
235  return FI;
236 }
237 
238 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
239  unsigned Alignment = 0;
240  Type *ValTy = nullptr;
241  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
242  Alignment = SI->getAlignment();
243  ValTy = SI->getValueOperand()->getType();
244  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
245  Alignment = LI->getAlignment();
246  ValTy = LI->getType();
247  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
248  // TODO(PR27168): This instruction has no alignment attribute, but unlike
249  // the default alignment for load/store, the default here is to assume
250  // it has NATURAL alignment, not DataLayout-specified alignment.
251  const DataLayout &DL = AI->getModule()->getDataLayout();
252  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
253  ValTy = AI->getCompareOperand()->getType();
254  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
255  // TODO(PR27168): This instruction has no alignment attribute, but unlike
256  // the default alignment for load/store, the default here is to assume
257  // it has NATURAL alignment, not DataLayout-specified alignment.
258  const DataLayout &DL = AI->getModule()->getDataLayout();
259  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
260  ValTy = AI->getType();
261  } else {
262  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
263  R << "unable to translate memop: " << ore::NV("Opcode", &I);
264  reportTranslationError(*MF, *TPC, *ORE, R);
265  return 1;
266  }
267 
268  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
269 }
270 
271 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
272  MachineBasicBlock *&MBB = BBToMBB[&BB];
273  assert(MBB && "BasicBlock was not encountered before");
274  return *MBB;
275 }
276 
277 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
278  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
279  MachinePreds[Edge].push_back(NewPred);
280 }
281 
282 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
283  MachineIRBuilder &MIRBuilder) {
284  // FIXME: handle signed/unsigned wrapping flags.
285 
286  // Get or create a virtual register for each value.
287  // Unless the value is a Constant => loadimm cst?
288  // or inline constant each time?
289  // Creation of a virtual register needs to have a size.
290  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
291  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
292  unsigned Res = getOrCreateVReg(U);
293  uint16_t Flags = 0;
294  if (isa<Instruction>(U)) {
295  const Instruction &I = cast<Instruction>(U);
297  }
298 
299  MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
300  return true;
301 }
302 
303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
304  // -0.0 - X --> G_FNEG
305  if (isa<Constant>(U.getOperand(0)) &&
307  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
308  .addDef(getOrCreateVReg(U))
309  .addUse(getOrCreateVReg(*U.getOperand(1)));
310  return true;
311  }
312  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
313 }
314 
315 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
316  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
317  .addDef(getOrCreateVReg(U))
318  .addUse(getOrCreateVReg(*U.getOperand(0)));
319  return true;
320 }
321 
322 bool IRTranslator::translateCompare(const User &U,
323  MachineIRBuilder &MIRBuilder) {
324  const CmpInst *CI = dyn_cast<CmpInst>(&U);
325  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
326  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
327  unsigned Res = getOrCreateVReg(U);
328  CmpInst::Predicate Pred =
329  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
330  cast<ConstantExpr>(U).getPredicate());
331  if (CmpInst::isIntPredicate(Pred))
332  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
333  else if (Pred == CmpInst::FCMP_FALSE)
334  MIRBuilder.buildCopy(
335  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
336  else if (Pred == CmpInst::FCMP_TRUE)
337  MIRBuilder.buildCopy(
338  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
339  else {
340  MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
342  }
343 
344  return true;
345 }
346 
347 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
348  const ReturnInst &RI = cast<ReturnInst>(U);
349  const Value *Ret = RI.getReturnValue();
350  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
351  Ret = nullptr;
352 
353  ArrayRef<unsigned> VRegs;
354  if (Ret)
355  VRegs = getOrCreateVRegs(*Ret);
356 
357  // The target may mess up with the insertion point, but
358  // this is not important as a return is the last instruction
359  // of the block anyway.
360 
361  return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
362 }
363 
364 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
365  const BranchInst &BrInst = cast<BranchInst>(U);
366  unsigned Succ = 0;
367  if (!BrInst.isUnconditional()) {
368  // We want a G_BRCOND to the true BB followed by an unconditional branch.
369  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
370  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
371  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
372  MIRBuilder.buildBrCond(Tst, TrueBB);
373  }
374 
375  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
376  MachineBasicBlock &TgtBB = getMBB(BrTgt);
377  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
378 
379  // If the unconditional target is the layout successor, fallthrough.
380  if (!CurBB.isLayoutSuccessor(&TgtBB))
381  MIRBuilder.buildBr(TgtBB);
382 
383  // Link successors.
384  for (const BasicBlock *Succ : successors(&BrInst))
385  CurBB.addSuccessor(&getMBB(*Succ));
386  return true;
387 }
388 
389 bool IRTranslator::translateSwitch(const User &U,
390  MachineIRBuilder &MIRBuilder) {
391  // For now, just translate as a chain of conditional branches.
392  // FIXME: could we share most of the logic/code in
393  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
394  // At first sight, it seems most of the logic in there is independent of
395  // SelectionDAG-specifics and a lot of work went in to optimize switch
396  // lowering in there.
397 
398  const SwitchInst &SwInst = cast<SwitchInst>(U);
399  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
400  const BasicBlock *OrigBB = SwInst.getParent();
401 
402  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
403  for (auto &CaseIt : SwInst.cases()) {
404  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
405  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
406  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
407  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
408  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
409  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
410 
411  MIRBuilder.buildBrCond(Tst, TrueMBB);
412  CurMBB.addSuccessor(&TrueMBB);
413  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
414 
415  MachineBasicBlock *FalseMBB =
416  MF->CreateMachineBasicBlock(SwInst.getParent());
417  // Insert the comparison blocks one after the other.
418  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
419  MIRBuilder.buildBr(*FalseMBB);
420  CurMBB.addSuccessor(FalseMBB);
421 
422  MIRBuilder.setMBB(*FalseMBB);
423  }
424  // handle default case
425  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
426  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
427  MIRBuilder.buildBr(DefaultMBB);
428  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
429  CurMBB.addSuccessor(&DefaultMBB);
430  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
431 
432  return true;
433 }
434 
435 bool IRTranslator::translateIndirectBr(const User &U,
436  MachineIRBuilder &MIRBuilder) {
437  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
438 
439  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
440  MIRBuilder.buildBrIndirect(Tgt);
441 
442  // Link successors.
443  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
444  for (const BasicBlock *Succ : successors(&BrInst))
445  CurBB.addSuccessor(&getMBB(*Succ));
446 
447  return true;
448 }
449 
450 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
451  const LoadInst &LI = cast<LoadInst>(U);
452 
453  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
455  Flags |= MachineMemOperand::MOLoad;
456 
457  if (DL->getTypeStoreSize(LI.getType()) == 0)
458  return true;
459 
460  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
461  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
462  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
463 
464  for (unsigned i = 0; i < Regs.size(); ++i) {
465  unsigned Addr = 0;
466  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
467 
468  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
469  unsigned BaseAlign = getMemOpAlignment(LI);
470  auto MMO = MF->getMachineMemOperand(
471  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
472  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
473  LI.getSyncScopeID(), LI.getOrdering());
474  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
475  }
476 
477  return true;
478 }
479 
480 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
481  const StoreInst &SI = cast<StoreInst>(U);
482  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
485 
486  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
487  return true;
488 
489  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
490  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
491  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
492 
493  for (unsigned i = 0; i < Vals.size(); ++i) {
494  unsigned Addr = 0;
495  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
496 
497  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
498  unsigned BaseAlign = getMemOpAlignment(SI);
499  auto MMO = MF->getMachineMemOperand(
500  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
501  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
502  SI.getSyncScopeID(), SI.getOrdering());
503  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
504  }
505  return true;
506 }
507 
508 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
509  const Value *Src = U.getOperand(0);
511 
512  // getIndexedOffsetInType is designed for GEPs, so the first index is the
513  // usual array element rather than looking into the actual aggregate.
514  SmallVector<Value *, 1> Indices;
515  Indices.push_back(ConstantInt::get(Int32Ty, 0));
516 
517  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
518  for (auto Idx : EVI->indices())
519  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
520  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
521  for (auto Idx : IVI->indices())
522  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
523  } else {
524  for (unsigned i = 1; i < U.getNumOperands(); ++i)
525  Indices.push_back(U.getOperand(i));
526  }
527 
528  return 8 * static_cast<uint64_t>(
529  DL.getIndexedOffsetInType(Src->getType(), Indices));
530 }
531 
532 bool IRTranslator::translateExtractValue(const User &U,
533  MachineIRBuilder &MIRBuilder) {
534  const Value *Src = U.getOperand(0);
535  uint64_t Offset = getOffsetFromIndices(U, *DL);
536  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
537  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
538  unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
539  auto &DstRegs = allocateVRegs(U);
540 
541  for (unsigned i = 0; i < DstRegs.size(); ++i)
542  DstRegs[i] = SrcRegs[Idx++];
543 
544  return true;
545 }
546 
547 bool IRTranslator::translateInsertValue(const User &U,
548  MachineIRBuilder &MIRBuilder) {
549  const Value *Src = U.getOperand(0);
550  uint64_t Offset = getOffsetFromIndices(U, *DL);
551  auto &DstRegs = allocateVRegs(U);
552  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
553  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
554  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
555  auto InsertedIt = InsertedRegs.begin();
556 
557  for (unsigned i = 0; i < DstRegs.size(); ++i) {
558  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
559  DstRegs[i] = *InsertedIt++;
560  else
561  DstRegs[i] = SrcRegs[i];
562  }
563 
564  return true;
565 }
566 
567 bool IRTranslator::translateSelect(const User &U,
568  MachineIRBuilder &MIRBuilder) {
569  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
570  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
571  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
572  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
573 
574  const SelectInst &SI = cast<SelectInst>(U);
575  uint16_t Flags = 0;
576  if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
578 
579  for (unsigned i = 0; i < ResRegs.size(); ++i) {
580  MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
581  {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
582  }
583 
584  return true;
585 }
586 
587 bool IRTranslator::translateBitCast(const User &U,
588  MachineIRBuilder &MIRBuilder) {
589  // If we're bitcasting to the source type, we can reuse the source vreg.
590  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
591  getLLTForType(*U.getType(), *DL)) {
592  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
593  auto &Regs = *VMap.getVRegs(U);
594  // If we already assigned a vreg for this bitcast, we can't change that.
595  // Emit a copy to satisfy the users we already emitted.
596  if (!Regs.empty())
597  MIRBuilder.buildCopy(Regs[0], SrcReg);
598  else {
599  Regs.push_back(SrcReg);
600  VMap.getOffsets(U)->push_back(0);
601  }
602  return true;
603  }
604  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
605 }
606 
607 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
608  MachineIRBuilder &MIRBuilder) {
609  unsigned Op = getOrCreateVReg(*U.getOperand(0));
610  unsigned Res = getOrCreateVReg(U);
611  MIRBuilder.buildInstr(Opcode, {Res}, {Op});
612  return true;
613 }
614 
615 bool IRTranslator::translateGetElementPtr(const User &U,
616  MachineIRBuilder &MIRBuilder) {
617  // FIXME: support vector GEPs.
618  if (U.getType()->isVectorTy())
619  return false;
620 
621  Value &Op0 = *U.getOperand(0);
622  unsigned BaseReg = getOrCreateVReg(Op0);
623  Type *PtrIRTy = Op0.getType();
624  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
625  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
626  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
627 
628  int64_t Offset = 0;
629  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
630  GTI != E; ++GTI) {
631  const Value *Idx = GTI.getOperand();
632  if (StructType *StTy = GTI.getStructTypeOrNull()) {
633  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
634  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
635  continue;
636  } else {
637  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
638 
639  // If this is a scalar constant or a splat vector of constants,
640  // handle it quickly.
641  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
642  Offset += ElementSize * CI->getSExtValue();
643  continue;
644  }
645 
646  if (Offset != 0) {
647  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
648  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
649  auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
650  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
651 
652  BaseReg = NewBaseReg;
653  Offset = 0;
654  }
655 
656  unsigned IdxReg = getOrCreateVReg(*Idx);
657  if (MRI->getType(IdxReg) != OffsetTy) {
658  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
659  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
660  IdxReg = NewIdxReg;
661  }
662 
663  // N = N + Idx * ElementSize;
664  // Avoid doing it for ElementSize of 1.
665  unsigned GepOffsetReg;
666  if (ElementSize != 1) {
667  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
668  auto ElementSizeMIB = MIRBuilder.buildConstant(
669  getLLTForType(*OffsetIRTy, *DL), ElementSize);
670  MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
671  } else
672  GepOffsetReg = IdxReg;
673 
674  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
675  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
676  BaseReg = NewBaseReg;
677  }
678  }
679 
680  if (Offset != 0) {
681  auto OffsetMIB =
682  MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
683  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
684  return true;
685  }
686 
687  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
688  return true;
689 }
690 
691 bool IRTranslator::translateMemfunc(const CallInst &CI,
692  MachineIRBuilder &MIRBuilder,
693  unsigned ID) {
694  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
695  Type *DstTy = CI.getArgOperand(0)->getType();
696  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
697  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
698  return false;
699 
701  for (int i = 0; i < 3; ++i) {
702  const auto &Arg = CI.getArgOperand(i);
703  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
704  }
705 
706  const char *Callee;
707  switch (ID) {
708  case Intrinsic::memmove:
709  case Intrinsic::memcpy: {
710  Type *SrcTy = CI.getArgOperand(1)->getType();
711  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
712  return false;
713  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
714  break;
715  }
716  case Intrinsic::memset:
717  Callee = "memset";
718  break;
719  default:
720  return false;
721  }
722 
723  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
724  MachineOperand::CreateES(Callee),
725  CallLowering::ArgInfo(0, CI.getType()), Args);
726 }
727 
728 void IRTranslator::getStackGuard(unsigned DstReg,
729  MachineIRBuilder &MIRBuilder) {
730  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
731  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
732  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
733  MIB.addDef(DstReg);
734 
735  auto &TLI = *MF->getSubtarget().getTargetLowering();
736  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
737  if (!Global)
738  return;
739 
740  MachinePointerInfo MPInfo(Global);
744  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
745  DL->getPointerABIAlignment(0));
746  MIB.setMemRefs({MemRef});
747 }
748 
749 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
750  MachineIRBuilder &MIRBuilder) {
751  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
752  MIRBuilder.buildInstr(Op)
753  .addDef(ResRegs[0])
754  .addDef(ResRegs[1])
755  .addUse(getOrCreateVReg(*CI.getOperand(0)))
756  .addUse(getOrCreateVReg(*CI.getOperand(1)));
757 
758  return true;
759 }
760 
761 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
762  switch (ID) {
763  default:
764  break;
765  case Intrinsic::bswap:
766  return TargetOpcode::G_BSWAP;
767  case Intrinsic::ceil:
768  return TargetOpcode::G_FCEIL;
769  case Intrinsic::cos:
770  return TargetOpcode::G_FCOS;
771  case Intrinsic::ctpop:
772  return TargetOpcode::G_CTPOP;
773  case Intrinsic::exp:
774  return TargetOpcode::G_FEXP;
775  case Intrinsic::exp2:
776  return TargetOpcode::G_FEXP2;
777  case Intrinsic::fabs:
778  return TargetOpcode::G_FABS;
779  case Intrinsic::canonicalize:
780  return TargetOpcode::G_FCANONICALIZE;
781  case Intrinsic::floor:
782  return TargetOpcode::G_FFLOOR;
783  case Intrinsic::fma:
784  return TargetOpcode::G_FMA;
785  case Intrinsic::log:
786  return TargetOpcode::G_FLOG;
787  case Intrinsic::log2:
788  return TargetOpcode::G_FLOG2;
789  case Intrinsic::log10:
790  return TargetOpcode::G_FLOG10;
791  case Intrinsic::pow:
792  return TargetOpcode::G_FPOW;
793  case Intrinsic::rint:
794  return TargetOpcode::G_FRINT;
795  case Intrinsic::round:
796  return TargetOpcode::G_INTRINSIC_ROUND;
797  case Intrinsic::sin:
798  return TargetOpcode::G_FSIN;
799  case Intrinsic::sqrt:
800  return TargetOpcode::G_FSQRT;
801  case Intrinsic::trunc:
802  return TargetOpcode::G_INTRINSIC_TRUNC;
803  }
805 }
806 
807 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
808  Intrinsic::ID ID,
809  MachineIRBuilder &MIRBuilder) {
810 
811  unsigned Op = getSimpleIntrinsicOpcode(ID);
812 
813  // Is this a simple intrinsic?
814  if (Op == Intrinsic::not_intrinsic)
815  return false;
816 
817  // Yes. Let's translate it.
819  for (auto &Arg : CI.arg_operands())
820  VRegs.push_back(getOrCreateVReg(*Arg));
821 
822  MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
824  return true;
825 }
826 
827 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
828  MachineIRBuilder &MIRBuilder) {
829 
830  // If this is a simple intrinsic (that is, we just need to add a def of
831  // a vreg, and uses for each arg operand, then translate it.
832  if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
833  return true;
834 
835  switch (ID) {
836  default:
837  break;
838  case Intrinsic::lifetime_start:
839  case Intrinsic::lifetime_end: {
840  // No stack colouring in O0, discard region information.
841  if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
842  return true;
843 
844  unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
846 
847  // Get the underlying objects for the location passed on the lifetime
848  // marker.
849  SmallVector<Value *, 4> Allocas;
850  GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
851 
852  // Iterate over each underlying object, creating lifetime markers for each
853  // static alloca. Quit if we find a non-static alloca.
854  for (Value *V : Allocas) {
855  AllocaInst *AI = dyn_cast<AllocaInst>(V);
856  if (!AI)
857  continue;
858 
859  if (!AI->isStaticAlloca())
860  return true;
861 
862  MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
863  }
864  return true;
865  }
866  case Intrinsic::dbg_declare: {
867  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
868  assert(DI.getVariable() && "Missing variable");
869 
870  const Value *Address = DI.getAddress();
871  if (!Address || isa<UndefValue>(Address)) {
872  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
873  return true;
874  }
875 
877  MIRBuilder.getDebugLoc()) &&
878  "Expected inlined-at fields to agree");
879  auto AI = dyn_cast<AllocaInst>(Address);
880  if (AI && AI->isStaticAlloca()) {
881  // Static allocas are tracked at the MF level, no need for DBG_VALUE
882  // instructions (in fact, they get ignored if they *do* exist).
883  MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
884  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
885  } else {
886  // A dbg.declare describes the address of a source variable, so lower it
887  // into an indirect DBG_VALUE.
888  MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
889  DI.getVariable(), DI.getExpression());
890  }
891  return true;
892  }
893  case Intrinsic::dbg_label: {
894  const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
895  assert(DI.getLabel() && "Missing label");
896 
898  MIRBuilder.getDebugLoc()) &&
899  "Expected inlined-at fields to agree");
900 
901  MIRBuilder.buildDbgLabel(DI.getLabel());
902  return true;
903  }
904  case Intrinsic::vaend:
905  // No target I know of cares about va_end. Certainly no in-tree target
906  // does. Simplest intrinsic ever!
907  return true;
908  case Intrinsic::vastart: {
909  auto &TLI = *MF->getSubtarget().getTargetLowering();
910  Value *Ptr = CI.getArgOperand(0);
911  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
912 
913  // FIXME: Get alignment
914  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
915  .addUse(getOrCreateVReg(*Ptr))
916  .addMemOperand(MF->getMachineMemOperand(
917  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
918  return true;
919  }
920  case Intrinsic::dbg_value: {
921  // This form of DBG_VALUE is target-independent.
922  const DbgValueInst &DI = cast<DbgValueInst>(CI);
923  const Value *V = DI.getValue();
925  MIRBuilder.getDebugLoc()) &&
926  "Expected inlined-at fields to agree");
927  if (!V) {
928  // Currently the optimizer can produce this; insert an undef to
929  // help debugging. Probably the optimizer should not do this.
930  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
931  } else if (const auto *CI = dyn_cast<Constant>(V)) {
932  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
933  } else {
934  unsigned Reg = getOrCreateVReg(*V);
935  // FIXME: This does not handle register-indirect values at offset 0. The
936  // direct/indirect thing shouldn't really be handled by something as
937  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
938  // pretty baked in right now.
939  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
940  }
941  return true;
942  }
943  case Intrinsic::uadd_with_overflow:
944  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
945  case Intrinsic::sadd_with_overflow:
946  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
947  case Intrinsic::usub_with_overflow:
948  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
949  case Intrinsic::ssub_with_overflow:
950  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
951  case Intrinsic::umul_with_overflow:
952  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
953  case Intrinsic::smul_with_overflow:
954  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
955  case Intrinsic::fmuladd: {
956  const TargetMachine &TM = MF->getTarget();
957  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
958  unsigned Dst = getOrCreateVReg(CI);
959  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
960  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
961  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
963  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
964  // TODO: Revisit this to see if we should move this part of the
965  // lowering to the combiner.
966  MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
968  } else {
969  LLT Ty = getLLTForType(*CI.getType(), *DL);
970  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
972  MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
974  }
975  return true;
976  }
977  case Intrinsic::memcpy:
978  case Intrinsic::memmove:
979  case Intrinsic::memset:
980  return translateMemfunc(CI, MIRBuilder, ID);
981  case Intrinsic::eh_typeid_for: {
983  unsigned Reg = getOrCreateVReg(CI);
984  unsigned TypeID = MF->getTypeIDFor(GV);
985  MIRBuilder.buildConstant(Reg, TypeID);
986  return true;
987  }
988  case Intrinsic::objectsize: {
989  // If we don't know by now, we're never going to know.
990  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
991 
992  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
993  return true;
994  }
995  case Intrinsic::is_constant:
996  // If this wasn't constant-folded away by now, then it's not a
997  // constant.
998  MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
999  return true;
1000  case Intrinsic::stackguard:
1001  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1002  return true;
1003  case Intrinsic::stackprotector: {
1004  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1005  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1006  getStackGuard(GuardVal, MIRBuilder);
1007 
1008  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1009  int FI = getOrCreateFrameIndex(*Slot);
1010  MF->getFrameInfo().setStackProtectorIndex(FI);
1011 
1012  MIRBuilder.buildStore(
1013  GuardVal, getOrCreateVReg(*Slot),
1014  *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1017  PtrTy.getSizeInBits() / 8, 8));
1018  return true;
1019  }
1020  case Intrinsic::stacksave: {
1021  // Save the stack pointer to the location provided by the intrinsic.
1022  unsigned Reg = getOrCreateVReg(CI);
1023  unsigned StackPtr = MF->getSubtarget()
1024  .getTargetLowering()
1025  ->getStackPointerRegisterToSaveRestore();
1026 
1027  // If the target doesn't specify a stack pointer, then fall back.
1028  if (!StackPtr)
1029  return false;
1030 
1031  MIRBuilder.buildCopy(Reg, StackPtr);
1032  return true;
1033  }
1034  case Intrinsic::stackrestore: {
1035  // Restore the stack pointer from the location provided by the intrinsic.
1036  unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0));
1037  unsigned StackPtr = MF->getSubtarget()
1038  .getTargetLowering()
1039  ->getStackPointerRegisterToSaveRestore();
1040 
1041  // If the target doesn't specify a stack pointer, then fall back.
1042  if (!StackPtr)
1043  return false;
1044 
1045  MIRBuilder.buildCopy(StackPtr, Reg);
1046  return true;
1047  }
1048  case Intrinsic::cttz:
1049  case Intrinsic::ctlz: {
1050  ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1051  bool isTrailing = ID == Intrinsic::cttz;
1052  unsigned Opcode = isTrailing
1053  ? Cst->isZero() ? TargetOpcode::G_CTTZ
1054  : TargetOpcode::G_CTTZ_ZERO_UNDEF
1055  : Cst->isZero() ? TargetOpcode::G_CTLZ
1056  : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1057  MIRBuilder.buildInstr(Opcode)
1058  .addDef(getOrCreateVReg(CI))
1059  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1060  return true;
1061  }
1062  case Intrinsic::invariant_start: {
1063  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1064  unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1065  MIRBuilder.buildUndef(Undef);
1066  return true;
1067  }
1068  case Intrinsic::invariant_end:
1069  return true;
1070  }
1071  return false;
1072 }
1073 
1074 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1075  MachineIRBuilder &MIRBuilder) {
1076  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1077  if (!IA.getConstraintString().empty())
1078  return false;
1079 
1080  unsigned ExtraInfo = 0;
1081  if (IA.hasSideEffects())
1082  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1083  if (IA.getDialect() == InlineAsm::AD_Intel)
1084  ExtraInfo |= InlineAsm::Extra_AsmDialect;
1085 
1087  .addExternalSymbol(IA.getAsmString().c_str())
1088  .addImm(ExtraInfo);
1089 
1090  return true;
1091 }
1092 
1093 unsigned IRTranslator::packRegs(const Value &V,
1094  MachineIRBuilder &MIRBuilder) {
1095  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1096  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1097  LLT BigTy = getLLTForType(*V.getType(), *DL);
1098 
1099  if (Regs.size() == 1)
1100  return Regs[0];
1101 
1102  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1103  MIRBuilder.buildUndef(Dst);
1104  for (unsigned i = 0; i < Regs.size(); ++i) {
1105  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1106  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1107  Dst = NewDst;
1108  }
1109  return Dst;
1110 }
1111 
1112 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1113  MachineIRBuilder &MIRBuilder) {
1114  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1115  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1116 
1117  for (unsigned i = 0; i < Regs.size(); ++i)
1118  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1119 }
1120 
1121 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1122  const CallInst &CI = cast<CallInst>(U);
1123  auto TII = MF->getTarget().getIntrinsicInfo();
1124  const Function *F = CI.getCalledFunction();
1125 
1126  // FIXME: support Windows dllimport function calls.
1127  if (F && F->hasDLLImportStorageClass())
1128  return false;
1129 
1130  if (CI.isInlineAsm())
1131  return translateInlineAsm(CI, MIRBuilder);
1132 
1134  if (F && F->isIntrinsic()) {
1135  ID = F->getIntrinsicID();
1136  if (TII && ID == Intrinsic::not_intrinsic)
1137  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1138  }
1139 
1140  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1141  bool IsSplitType = valueIsSplit(CI);
1142  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1143  getLLTForType(*CI.getType(), *DL))
1144  : getOrCreateVReg(CI);
1145 
1147  for (auto &Arg: CI.arg_operands())
1148  Args.push_back(packRegs(*Arg, MIRBuilder));
1149 
1150  MF->getFrameInfo().setHasCalls(true);
1151  bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1152  return getOrCreateVReg(*CI.getCalledValue());
1153  });
1154 
1155  if (IsSplitType)
1156  unpackRegs(CI, Res, MIRBuilder);
1157  return Success;
1158  }
1159 
1160  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1161 
1162  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1163  return true;
1164 
1165  ArrayRef<unsigned> ResultRegs;
1166  if (!CI.getType()->isVoidTy())
1167  ResultRegs = getOrCreateVRegs(CI);
1168 
1169  MachineInstrBuilder MIB =
1170  MIRBuilder.buildIntrinsic(ID, ResultRegs, !CI.doesNotAccessMemory());
1171  if (isa<FPMathOperator>(CI))
1172  MIB->copyIRFlags(CI);
1173 
1174  for (auto &Arg : CI.arg_operands()) {
1175  // Some intrinsics take metadata parameters. Reject them.
1176  if (isa<MetadataAsValue>(Arg))
1177  return false;
1178  MIB.addUse(packRegs(*Arg, MIRBuilder));
1179  }
1180 
1181  // Add a MachineMemOperand if it is a target mem intrinsic.
1182  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1183  TargetLowering::IntrinsicInfo Info;
1184  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1185  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1186  unsigned Align = Info.align;
1187  if (Align == 0)
1188  Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
1189 
1190  uint64_t Size = Info.memVT.getStoreSize();
1191  MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1192  Info.flags, Size, Align));
1193  }
1194 
1195  return true;
1196 }
1197 
1198 bool IRTranslator::translateInvoke(const User &U,
1199  MachineIRBuilder &MIRBuilder) {
1200  const InvokeInst &I = cast<InvokeInst>(U);
1201  MCContext &Context = MF->getContext();
1202 
1203  const BasicBlock *ReturnBB = I.getSuccessor(0);
1204  const BasicBlock *EHPadBB = I.getSuccessor(1);
1205 
1206  const Value *Callee = I.getCalledValue();
1207  const Function *Fn = dyn_cast<Function>(Callee);
1208  if (isa<InlineAsm>(Callee))
1209  return false;
1210 
1211  // FIXME: support invoking patchpoint and statepoint intrinsics.
1212  if (Fn && Fn->isIntrinsic())
1213  return false;
1214 
1215  // FIXME: support whatever these are.
1217  return false;
1218 
1219  // FIXME: support Windows exception handling.
1220  if (!isa<LandingPadInst>(EHPadBB->front()))
1221  return false;
1222 
1223  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1224  // the region covered by the try.
1225  MCSymbol *BeginSymbol = Context.createTempSymbol();
1226  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1227 
1228  unsigned Res = 0;
1229  if (!I.getType()->isVoidTy())
1230  Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1232  for (auto &Arg: I.arg_operands())
1233  Args.push_back(packRegs(*Arg, MIRBuilder));
1234 
1235  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1236  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1237  return false;
1238 
1239  unpackRegs(I, Res, MIRBuilder);
1240 
1241  MCSymbol *EndSymbol = Context.createTempSymbol();
1242  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1243 
1244  // FIXME: track probabilities.
1245  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1246  &ReturnMBB = getMBB(*ReturnBB);
1247  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1248  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1249  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1250  MIRBuilder.buildBr(ReturnMBB);
1251 
1252  return true;
1253 }
1254 
1255 bool IRTranslator::translateCallBr(const User &U,
1256  MachineIRBuilder &MIRBuilder) {
1257  // FIXME: Implement this.
1258  return false;
1259 }
1260 
1261 bool IRTranslator::translateLandingPad(const User &U,
1262  MachineIRBuilder &MIRBuilder) {
1263  const LandingPadInst &LP = cast<LandingPadInst>(U);
1264 
1265  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1266 
1267  MBB.setIsEHPad();
1268 
1269  // If there aren't registers to copy the values into (e.g., during SjLj
1270  // exceptions), then don't bother.
1271  auto &TLI = *MF->getSubtarget().getTargetLowering();
1272  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1273  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1274  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1275  return true;
1276 
1277  // If landingpad's return type is token type, we don't create DAG nodes
1278  // for its exception pointer and selector value. The extraction of exception
1279  // pointer or selector value from token type landingpads is not currently
1280  // supported.
1281  if (LP.getType()->isTokenTy())
1282  return true;
1283 
1284  // Add a label to mark the beginning of the landing pad. Deletion of the
1285  // landing pad can thus be detected via the MachineModuleInfo.
1287  .addSym(MF->addLandingPad(&MBB));
1288 
1289  LLT Ty = getLLTForType(*LP.getType(), *DL);
1290  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1291  MIRBuilder.buildUndef(Undef);
1292 
1293  SmallVector<LLT, 2> Tys;
1294  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1295  Tys.push_back(getLLTForType(*Ty, *DL));
1296  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1297 
1298  // Mark exception register as live in.
1299  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1300  if (!ExceptionReg)
1301  return false;
1302 
1303  MBB.addLiveIn(ExceptionReg);
1304  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1305  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1306 
1307  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1308  if (!SelectorReg)
1309  return false;
1310 
1311  MBB.addLiveIn(SelectorReg);
1312  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1313  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1314  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1315 
1316  return true;
1317 }
1318 
1319 bool IRTranslator::translateAlloca(const User &U,
1320  MachineIRBuilder &MIRBuilder) {
1321  auto &AI = cast<AllocaInst>(U);
1322 
1323  if (AI.isSwiftError())
1324  return false;
1325 
1326  if (AI.isStaticAlloca()) {
1327  unsigned Res = getOrCreateVReg(AI);
1328  int FI = getOrCreateFrameIndex(AI);
1329  MIRBuilder.buildFrameIndex(Res, FI);
1330  return true;
1331  }
1332 
1333  // FIXME: support stack probing for Windows.
1334  if (MF->getTarget().getTargetTriple().isOSWindows())
1335  return false;
1336 
1337  // Now we're in the harder dynamic case.
1338  Type *Ty = AI.getAllocatedType();
1339  unsigned Align =
1340  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1341 
1342  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1343 
1344  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1345  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1346  if (MRI->getType(NumElts) != IntPtrTy) {
1347  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1348  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1349  NumElts = ExtElts;
1350  }
1351 
1352  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1353  unsigned TySize =
1354  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1355  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1356 
1357  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1358  auto &TLI = *MF->getSubtarget().getTargetLowering();
1359  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1360 
1361  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1362  MIRBuilder.buildCopy(SPTmp, SPReg);
1363 
1364  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1365  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1366 
1367  // Handle alignment. We have to realign if the allocation granule was smaller
1368  // than stack alignment, or the specific alloca requires more than stack
1369  // alignment.
1370  unsigned StackAlign =
1371  MF->getSubtarget().getFrameLowering()->getStackAlignment();
1372  Align = std::max(Align, StackAlign);
1373  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1374  // Round the size of the allocation up to the stack alignment size
1375  // by add SA-1 to the size. This doesn't overflow because we're computing
1376  // an address inside an alloca.
1377  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1378  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1379  AllocTmp = AlignedAlloc;
1380  }
1381 
1382  MIRBuilder.buildCopy(SPReg, AllocTmp);
1383  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1384 
1385  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1386  assert(MF->getFrameInfo().hasVarSizedObjects());
1387  return true;
1388 }
1389 
1390 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1391  // FIXME: We may need more info about the type. Because of how LLT works,
1392  // we're completely discarding the i64/double distinction here (amongst
1393  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1394  // anyway but that's not guaranteed.
1395  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1396  .addDef(getOrCreateVReg(U))
1397  .addUse(getOrCreateVReg(*U.getOperand(0)))
1398  .addImm(DL->getABITypeAlignment(U.getType()));
1399  return true;
1400 }
1401 
1402 bool IRTranslator::translateInsertElement(const User &U,
1403  MachineIRBuilder &MIRBuilder) {
1404  // If it is a <1 x Ty> vector, use the scalar as it is
1405  // not a legal vector type in LLT.
1406  if (U.getType()->getVectorNumElements() == 1) {
1407  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1408  auto &Regs = *VMap.getVRegs(U);
1409  if (Regs.empty()) {
1410  Regs.push_back(Elt);
1411  VMap.getOffsets(U)->push_back(0);
1412  } else {
1413  MIRBuilder.buildCopy(Regs[0], Elt);
1414  }
1415  return true;
1416  }
1417 
1418  unsigned Res = getOrCreateVReg(U);
1419  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1420  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1421  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1422  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1423  return true;
1424 }
1425 
1426 bool IRTranslator::translateExtractElement(const User &U,
1427  MachineIRBuilder &MIRBuilder) {
1428  // If it is a <1 x Ty> vector, use the scalar as it is
1429  // not a legal vector type in LLT.
1430  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1431  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1432  auto &Regs = *VMap.getVRegs(U);
1433  if (Regs.empty()) {
1434  Regs.push_back(Elt);
1435  VMap.getOffsets(U)->push_back(0);
1436  } else {
1437  MIRBuilder.buildCopy(Regs[0], Elt);
1438  }
1439  return true;
1440  }
1441  unsigned Res = getOrCreateVReg(U);
1442  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1443  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1444  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1445  unsigned Idx = 0;
1446  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1447  if (CI->getBitWidth() != PreferredVecIdxWidth) {
1448  APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1449  auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1450  Idx = getOrCreateVReg(*NewIdxCI);
1451  }
1452  }
1453  if (!Idx)
1454  Idx = getOrCreateVReg(*U.getOperand(1));
1455  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1456  const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1457  Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1458  }
1459  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1460  return true;
1461 }
1462 
1463 bool IRTranslator::translateShuffleVector(const User &U,
1464  MachineIRBuilder &MIRBuilder) {
1465  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1466  .addDef(getOrCreateVReg(U))
1467  .addUse(getOrCreateVReg(*U.getOperand(0)))
1468  .addUse(getOrCreateVReg(*U.getOperand(1)))
1469  .addUse(getOrCreateVReg(*U.getOperand(2)));
1470  return true;
1471 }
1472 
1473 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1474  const PHINode &PI = cast<PHINode>(U);
1475 
1477  for (auto Reg : getOrCreateVRegs(PI)) {
1478  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1479  Insts.push_back(MIB.getInstr());
1480  }
1481 
1482  PendingPHIs.emplace_back(&PI, std::move(Insts));
1483  return true;
1484 }
1485 
1486 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1487  MachineIRBuilder &MIRBuilder) {
1488  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1489 
1490  if (I.isWeak())
1491  return false;
1492 
1493  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1496 
1497  Type *ResType = I.getType();
1498  Type *ValType = ResType->Type::getStructElementType(0);
1499 
1500  auto Res = getOrCreateVRegs(I);
1501  unsigned OldValRes = Res[0];
1502  unsigned SuccessRes = Res[1];
1503  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1504  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1505  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1506 
1507  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1508  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1509  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1510  Flags, DL->getTypeStoreSize(ValType),
1511  getMemOpAlignment(I), AAMDNodes(), nullptr,
1513  I.getFailureOrdering()));
1514  return true;
1515 }
1516 
1517 bool IRTranslator::translateAtomicRMW(const User &U,
1518  MachineIRBuilder &MIRBuilder) {
1519  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1520 
1521  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1524 
1525  Type *ResType = I.getType();
1526 
1527  unsigned Res = getOrCreateVReg(I);
1528  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1529  unsigned Val = getOrCreateVReg(*I.getValOperand());
1530 
1531  unsigned Opcode = 0;
1532  switch (I.getOperation()) {
1533  default:
1534  llvm_unreachable("Unknown atomicrmw op");
1535  return false;
1536  case AtomicRMWInst::Xchg:
1537  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1538  break;
1539  case AtomicRMWInst::Add:
1540  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1541  break;
1542  case AtomicRMWInst::Sub:
1543  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1544  break;
1545  case AtomicRMWInst::And:
1546  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1547  break;
1548  case AtomicRMWInst::Nand:
1549  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1550  break;
1551  case AtomicRMWInst::Or:
1552  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1553  break;
1554  case AtomicRMWInst::Xor:
1555  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1556  break;
1557  case AtomicRMWInst::Max:
1558  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1559  break;
1560  case AtomicRMWInst::Min:
1561  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1562  break;
1563  case AtomicRMWInst::UMax:
1564  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1565  break;
1566  case AtomicRMWInst::UMin:
1567  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1568  break;
1569  }
1570 
1571  MIRBuilder.buildAtomicRMW(
1572  Opcode, Res, Addr, Val,
1573  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1574  Flags, DL->getTypeStoreSize(ResType),
1575  getMemOpAlignment(I), AAMDNodes(), nullptr,
1576  I.getSyncScopeID(), I.getOrdering()));
1577  return true;
1578 }
1579 
1580 void IRTranslator::finishPendingPhis() {
1581 #ifndef NDEBUG
1582  DILocationVerifier Verifier;
1583  GISelObserverWrapper WrapperObserver(&Verifier);
1584  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1585 #endif // ifndef NDEBUG
1586  for (auto &Phi : PendingPHIs) {
1587  const PHINode *PI = Phi.first;
1588  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1589  EntryBuilder->setDebugLoc(PI->getDebugLoc());
1590 #ifndef NDEBUG
1591  Verifier.setCurrentInst(PI);
1592 #endif // ifndef NDEBUG
1593 
1594  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1595  // won't create extra control flow here, otherwise we need to find the
1596  // dominating predecessor here (or perhaps force the weirder IRTranslators
1597  // to provide a simple boundary).
1598  SmallSet<const BasicBlock *, 4> HandledPreds;
1599 
1600  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1601  auto IRPred = PI->getIncomingBlock(i);
1602  if (HandledPreds.count(IRPred))
1603  continue;
1604 
1605  HandledPreds.insert(IRPred);
1606  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1607  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1608  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1609  "incorrect CFG at MachineBasicBlock level");
1610  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1611  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1612  MIB.addUse(ValRegs[j]);
1613  MIB.addMBB(Pred);
1614  }
1615  }
1616  }
1617  }
1618 }
1619 
1620 bool IRTranslator::valueIsSplit(const Value &V,
1622  SmallVector<LLT, 4> SplitTys;
1623  if (Offsets && !Offsets->empty())
1624  Offsets->clear();
1625  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1626  return SplitTys.size() > 1;
1627 }
1628 
1629 bool IRTranslator::translate(const Instruction &Inst) {
1630  CurBuilder->setDebugLoc(Inst.getDebugLoc());
1631  EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1632  switch(Inst.getOpcode()) {
1633 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1634  case Instruction::OPCODE: \
1635  return translate##OPCODE(Inst, *CurBuilder.get());
1636 #include "llvm/IR/Instruction.def"
1637  default:
1638  return false;
1639  }
1640 }
1641 
1642 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1643  if (auto CI = dyn_cast<ConstantInt>(&C))
1644  EntryBuilder->buildConstant(Reg, *CI);
1645  else if (auto CF = dyn_cast<ConstantFP>(&C))
1646  EntryBuilder->buildFConstant(Reg, *CF);
1647  else if (isa<UndefValue>(C))
1648  EntryBuilder->buildUndef(Reg);
1649  else if (isa<ConstantPointerNull>(C)) {
1650  // As we are trying to build a constant val of 0 into a pointer,
1651  // insert a cast to make them correct with respect to types.
1652  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1653  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1654  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1655  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1656  EntryBuilder->buildCast(Reg, ZeroReg);
1657  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1658  EntryBuilder->buildGlobalValue(Reg, GV);
1659  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1660  if (!CAZ->getType()->isVectorTy())
1661  return false;
1662  // Return the scalar if it is a <1 x Ty> vector.
1663  if (CAZ->getNumElements() == 1)
1664  return translate(*CAZ->getElementValue(0u), Reg);
1666  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1667  Constant &Elt = *CAZ->getElementValue(i);
1668  Ops.push_back(getOrCreateVReg(Elt));
1669  }
1670  EntryBuilder->buildBuildVector(Reg, Ops);
1671  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1672  // Return the scalar if it is a <1 x Ty> vector.
1673  if (CV->getNumElements() == 1)
1674  return translate(*CV->getElementAsConstant(0), Reg);
1676  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1677  Constant &Elt = *CV->getElementAsConstant(i);
1678  Ops.push_back(getOrCreateVReg(Elt));
1679  }
1680  EntryBuilder->buildBuildVector(Reg, Ops);
1681  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1682  switch(CE->getOpcode()) {
1683 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1684  case Instruction::OPCODE: \
1685  return translate##OPCODE(*CE, *EntryBuilder.get());
1686 #include "llvm/IR/Instruction.def"
1687  default:
1688  return false;
1689  }
1690  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1691  if (CV->getNumOperands() == 1)
1692  return translate(*CV->getOperand(0), Reg);
1694  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1695  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1696  }
1697  EntryBuilder->buildBuildVector(Reg, Ops);
1698  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1699  EntryBuilder->buildBlockAddress(Reg, BA);
1700  } else
1701  return false;
1702 
1703  return true;
1704 }
1705 
1706 void IRTranslator::finalizeFunction() {
1707  // Release the memory used by the different maps we
1708  // needed during the translation.
1709  PendingPHIs.clear();
1710  VMap.reset();
1711  FrameIndices.clear();
1712  MachinePreds.clear();
1713  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1714  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1715  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1716  EntryBuilder.reset();
1717  CurBuilder.reset();
1718 }
1719 
1721  MF = &CurMF;
1722  const Function &F = MF->getFunction();
1723  if (F.empty())
1724  return false;
1726  getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1727  // Set the CSEConfig and run the analysis.
1728  GISelCSEInfo *CSEInfo = nullptr;
1729  TPC = &getAnalysis<TargetPassConfig>();
1730  bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
1732  : TPC->isGISelCSEEnabled();
1733 
1734  if (EnableCSE) {
1735  EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1736  CSEInfo = &Wrapper.get(TPC->getCSEConfig());
1737  EntryBuilder->setCSEInfo(CSEInfo);
1738  CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1739  CurBuilder->setCSEInfo(CSEInfo);
1740  } else {
1741  EntryBuilder = make_unique<MachineIRBuilder>();
1742  CurBuilder = make_unique<MachineIRBuilder>();
1743  }
1744  CLI = MF->getSubtarget().getCallLowering();
1745  CurBuilder->setMF(*MF);
1746  EntryBuilder->setMF(*MF);
1747  MRI = &MF->getRegInfo();
1748  DL = &F.getParent()->getDataLayout();
1749  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1750 
1751  assert(PendingPHIs.empty() && "stale PHIs");
1752 
1753  if (!DL->isLittleEndian()) {
1754  // Currently we don't properly handle big endian code.
1755  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1756  F.getSubprogram(), &F.getEntryBlock());
1757  R << "unable to translate in big endian mode";
1758  reportTranslationError(*MF, *TPC, *ORE, R);
1759  }
1760 
1761  // Release the per-function state when we return, whether we succeeded or not.
1762  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1763 
1764  // Setup a separate basic-block for the arguments and constants
1765  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1766  MF->push_back(EntryBB);
1767  EntryBuilder->setMBB(*EntryBB);
1768 
1769  // Create all blocks, in IR order, to preserve the layout.
1770  for (const BasicBlock &BB: F) {
1771  auto *&MBB = BBToMBB[&BB];
1772 
1773  MBB = MF->CreateMachineBasicBlock(&BB);
1774  MF->push_back(MBB);
1775 
1776  if (BB.hasAddressTaken())
1777  MBB->setHasAddressTaken();
1778  }
1779 
1780  // Make our arguments/constants entry block fallthrough to the IR entry block.
1781  EntryBB->addSuccessor(&getMBB(F.front()));
1782 
1783  // Lower the actual args into this basic block.
1784  SmallVector<unsigned, 8> VRegArgs;
1785  for (const Argument &Arg: F.args()) {
1786  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1787  continue; // Don't handle zero sized types.
1788  VRegArgs.push_back(
1789  MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1790  }
1791 
1792  // We don't currently support translating swifterror or swiftself functions.
1793  for (auto &Arg : F.args()) {
1794  if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1795  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1796  F.getSubprogram(), &F.getEntryBlock());
1797  R << "unable to lower arguments due to swifterror/swiftself: "
1798  << ore::NV("Prototype", F.getType());
1799  reportTranslationError(*MF, *TPC, *ORE, R);
1800  return false;
1801  }
1802  }
1803 
1804  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1805  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1806  F.getSubprogram(), &F.getEntryBlock());
1807  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1808  reportTranslationError(*MF, *TPC, *ORE, R);
1809  return false;
1810  }
1811 
1812  auto ArgIt = F.arg_begin();
1813  for (auto &VArg : VRegArgs) {
1814  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1815  // creating redundant copies.
1816  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1817  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1818  assert(VRegs.empty() && "VRegs already populated?");
1819  VRegs.push_back(VArg);
1820  } else {
1821  unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1822  }
1823  ArgIt++;
1824  }
1825 
1826  // Need to visit defs before uses when translating instructions.
1827  GISelObserverWrapper WrapperObserver;
1828  if (EnableCSE && CSEInfo)
1829  WrapperObserver.addObserver(CSEInfo);
1830  {
1832 #ifndef NDEBUG
1833  DILocationVerifier Verifier;
1834  WrapperObserver.addObserver(&Verifier);
1835 #endif // ifndef NDEBUG
1836  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1837  for (const BasicBlock *BB : RPOT) {
1838  MachineBasicBlock &MBB = getMBB(*BB);
1839  // Set the insertion point of all the following translations to
1840  // the end of this basic block.
1841  CurBuilder->setMBB(MBB);
1842 
1843  for (const Instruction &Inst : *BB) {
1844 #ifndef NDEBUG
1845  Verifier.setCurrentInst(&Inst);
1846 #endif // ifndef NDEBUG
1847  if (translate(Inst))
1848  continue;
1849 
1850  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1851  Inst.getDebugLoc(), BB);
1852  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1853 
1854  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1855  std::string InstStrStorage;
1856  raw_string_ostream InstStr(InstStrStorage);
1857  InstStr << Inst;
1858 
1859  R << ": '" << InstStr.str() << "'";
1860  }
1861 
1862  reportTranslationError(*MF, *TPC, *ORE, R);
1863  return false;
1864  }
1865  }
1866 #ifndef NDEBUG
1867  WrapperObserver.removeObserver(&Verifier);
1868 #endif
1869  }
1870 
1871  finishPendingPhis();
1872 
1873  // Merge the argument lowering and constants block with its single
1874  // successor, the LLVM-IR entry block. We want the basic block to
1875  // be maximal.
1876  assert(EntryBB->succ_size() == 1 &&
1877  "Custom BB used for lowering should have only one successor");
1878  // Get the successor of the current entry block.
1879  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1880  assert(NewEntryBB.pred_size() == 1 &&
1881  "LLVM-IR entry block has a predecessor!?");
1882  // Move all the instruction from the current entry block to the
1883  // new entry block.
1884  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1885  EntryBB->end());
1886 
1887  // Update the live-in information for the new entry block.
1888  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1889  NewEntryBB.addLiveIn(LiveIn);
1890  NewEntryBB.sortUniqueLiveIns();
1891 
1892  // Get rid of the now empty basic block.
1893  EntryBB->removeSuccessor(&NewEntryBB);
1894  MF->remove(EntryBB);
1895  MF->DeleteMachineBasicBlock(EntryBB);
1896 
1897  assert(&MF->front() == &NewEntryBB &&
1898  "New entry wasn't next in the list of basic block!");
1899 
1900  // Initialize stack protector information.
1901  StackProtector &SP = getAnalysis<StackProtector>();
1902  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1903 
1904  return false;
1905 }
auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1281
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:409
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:198
A simple RAII based CSEInfo installer.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:636
bool empty() const
Definition: Function.h:664
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:172
The CSE Analysis object.
Definition: CSEInfo.h:71
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
*p = old <signed v ? old : v
Definition: Instructions.h:721
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
LLVMContext & Context
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1465
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
DiagnosticInfoOptimizationBase::Argument NV
This represents the llvm.dbg.label instruction.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:338
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
iterator begin() const
Definition: ArrayRef.h:136
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:528
void push_back(const T &Elt)
Definition: SmallVector.h:211
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< unsigned > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
unsigned getReg() const
getReg - Returns the register number.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:384
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:80
*p = old <unsigned v ? old : v
Definition: Instructions.h:725
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:135
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:247
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1034
*p = old >unsigned v ? old : v
Definition: Instructions.h:723
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:709
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:261
BasicBlock * getSuccessor(unsigned i) const
unsigned const TargetRegisterInfo * TRI
F(f)
The actual analysis pass wrapper.
Definition: CSEInfo.h:218
An instruction for reading from memory.
Definition: Instructions.h:167
void setMF(MachineFunction &MF)
Definition: CSEInfo.cpp:77
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:147
*p = old >signed v ? old : v
Definition: Instructions.h:719
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:595
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool hasSideEffects() const
Definition: InlineAsm.h:66
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1155
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
A description of a memory reference used in the backend.
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1692
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
#define DEBUG_TYPE
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
Class to represent struct types.
Definition: DerivedTypes.h:232
DILabel * getLabel() const
BinOp getOperation() const
Definition: Instructions.h:750
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:569
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:557
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:668
Context object for machine code objects.
Definition: MCContext.h:62
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
Definition: Lint.cpp:83
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1147
An instruction for storing to memory.
Definition: Instructions.h:320
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:850
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
The memory access is volatile.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
const BasicBlock & getEntryBlock() const
Definition: Function.h:642
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:427
Abstract class that contains various methods for clients to notify about changes. ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:802
unsigned const MachineRegisterInfo * MRI
Value * getCalledValue() const
Definition: InstrTypes.h:1194
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1507
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Value * getAddress() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1270
Value * getValue() const
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:280
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:116
Indirect Branch Instruction.
Helper class to build MachineInstr.
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
GISelCSEInfo & get(std::unique_ptr< CSEConfigBase > CSEOpt, bool ReCompute=false)
Takes a CSEConfigBase object that defines what opcodes get CSEd.
Definition: CSEInfo.cpp:363
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:646
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Value * getPointerOperand()
Definition: Instructions.h:284
self_iterator getIterator()
Definition: ilist_node.h:81
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:328
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:663
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:608
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static uint16_t copyFlagsFromInstruction(const Instruction &I)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:789
Simple wrapper that does the following.
Definition: CSEInfo.h:200
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:498
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:81
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:676
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:391
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Value * getValOperand()
Definition: Instructions.h:815
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:87
unsigned getNumOperands() const
Definition: User.h:191
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Build and insert Res = G_MUL Op0, Op1.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
iterator end() const
Definition: ArrayRef.h:137
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:257
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:179
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:193
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:538
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:493
bool isIntPredicate() const
Definition: InstrTypes.h:739
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
amdgpu Simplify well known AMD library false FunctionCallee Callee
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:60
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
Definition: MachineInstr.h:63
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:721
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
void addObserver(GISelChangeObserver *O)
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:353
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:324
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
void removeObserver(GISelChangeObserver *O)
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:372
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:193
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1258
verify safepoint Safepoint IR Verifier
Value * getPointerOperand()
Definition: Instructions.h:811
TargetOptions Options
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1201
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:259
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:780
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
DILocalVariable * getVariable() const
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool isUnconditional() const
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
AsmDialect getDialect() const
Definition: InlineAsm.h:68
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
Multiway switch.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:482
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
succ_range successors(Instruction *I)
Definition: CFG.h:259
This file describes how to lower LLVM calls to machine code calls.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Simple wrapper observer that takes several observers, and calls each one for each event...
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1237
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:412
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:192
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:806
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:774
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:648
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164