LLVM  6.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/Analysis.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DebugInfo.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/User.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/MC/MCContext.h"
49 #include "llvm/Pass.h"
50 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/CodeGen.h"
52 #include "llvm/Support/Debug.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <cstdint>
66 #include <iterator>
67 #include <string>
68 #include <utility>
69 #include <vector>
70 
71 #define DEBUG_TYPE "irtranslator"
72 
73 using namespace llvm;
74 
75 char IRTranslator::ID = 0;
76 
77 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
78  false, false)
80 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
81  false, false)
82 
88 
89  // Print the function name explicitly if we don't have a debug location (which
90  // makes the diagnostic less useful) or if we're going to emit a raw error.
91  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
92  R << (" (in function: " + MF.getName() + ")").str();
93 
94  if (TPC.isGlobalISelAbortEnabled())
95  report_fatal_error(R.getMsg());
96  else
97  ORE.emit(R);
98 }
99 
102 }
103 
107 }
108 
109 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
110  unsigned &ValReg = ValToVReg[&Val];
111 
112  if (ValReg)
113  return ValReg;
114 
115  // Fill ValRegsSequence with the sequence of registers
116  // we need to concat together to produce the value.
117  assert(Val.getType()->isSized() &&
118  "Don't know how to create an empty vreg");
119  unsigned VReg =
121  ValReg = VReg;
122 
123  if (auto CV = dyn_cast<Constant>(&Val)) {
124  bool Success = translate(*CV, VReg);
125  if (!Success) {
126  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
127  MF->getFunction()->getSubprogram(),
128  &MF->getFunction()->getEntryBlock());
129  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
130  reportTranslationError(*MF, *TPC, *ORE, R);
131  return VReg;
132  }
133  }
134 
135  return VReg;
136 }
137 
138 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
139  if (FrameIndices.find(&AI) != FrameIndices.end())
140  return FrameIndices[&AI];
141 
142  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
143  unsigned Size =
144  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
145 
146  // Always allocate at least one byte.
147  Size = std::max(Size, 1u);
148 
149  unsigned Alignment = AI.getAlignment();
150  if (!Alignment)
151  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
152 
153  int &FI = FrameIndices[&AI];
154  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
155  return FI;
156 }
157 
158 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
159  unsigned Alignment = 0;
160  Type *ValTy = nullptr;
161  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
162  Alignment = SI->getAlignment();
163  ValTy = SI->getValueOperand()->getType();
164  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
165  Alignment = LI->getAlignment();
166  ValTy = LI->getType();
167  } else {
168  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
169  R << "unable to translate memop: " << ore::NV("Opcode", &I);
170  reportTranslationError(*MF, *TPC, *ORE, R);
171  return 1;
172  }
173 
174  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
175 }
176 
177 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
178  MachineBasicBlock *&MBB = BBToMBB[&BB];
179  assert(MBB && "BasicBlock was not encountered before");
180  return *MBB;
181 }
182 
183 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
184  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
185  MachinePreds[Edge].push_back(NewPred);
186 }
187 
188 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
189  MachineIRBuilder &MIRBuilder) {
190  // FIXME: handle signed/unsigned wrapping flags.
191 
192  // Get or create a virtual register for each value.
193  // Unless the value is a Constant => loadimm cst?
194  // or inline constant each time?
195  // Creation of a virtual register needs to have a size.
196  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
197  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
198  unsigned Res = getOrCreateVReg(U);
199  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
200  return true;
201 }
202 
203 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
204  // -0.0 - X --> G_FNEG
205  if (isa<Constant>(U.getOperand(0)) &&
207  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
208  .addDef(getOrCreateVReg(U))
209  .addUse(getOrCreateVReg(*U.getOperand(1)));
210  return true;
211  }
212  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
213 }
214 
215 bool IRTranslator::translateCompare(const User &U,
216  MachineIRBuilder &MIRBuilder) {
217  const CmpInst *CI = dyn_cast<CmpInst>(&U);
218  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
219  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
220  unsigned Res = getOrCreateVReg(U);
221  CmpInst::Predicate Pred =
222  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
223  cast<ConstantExpr>(U).getPredicate());
224  if (CmpInst::isIntPredicate(Pred))
225  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
226  else if (Pred == CmpInst::FCMP_FALSE)
227  MIRBuilder.buildCopy(
228  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
229  else if (Pred == CmpInst::FCMP_TRUE)
230  MIRBuilder.buildCopy(
231  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
232  else
233  MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
234 
235  return true;
236 }
237 
238 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
239  const ReturnInst &RI = cast<ReturnInst>(U);
240  const Value *Ret = RI.getReturnValue();
241  // The target may mess up with the insertion point, but
242  // this is not important as a return is the last instruction
243  // of the block anyway.
244  return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
245 }
246 
247 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
248  const BranchInst &BrInst = cast<BranchInst>(U);
249  unsigned Succ = 0;
250  if (!BrInst.isUnconditional()) {
251  // We want a G_BRCOND to the true BB followed by an unconditional branch.
252  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
253  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
254  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
255  MIRBuilder.buildBrCond(Tst, TrueBB);
256  }
257 
258  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
259  MachineBasicBlock &TgtBB = getMBB(BrTgt);
260  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
261 
262  // If the unconditional target is the layout successor, fallthrough.
263  if (!CurBB.isLayoutSuccessor(&TgtBB))
264  MIRBuilder.buildBr(TgtBB);
265 
266  // Link successors.
267  for (const BasicBlock *Succ : BrInst.successors())
268  CurBB.addSuccessor(&getMBB(*Succ));
269  return true;
270 }
271 
272 bool IRTranslator::translateSwitch(const User &U,
273  MachineIRBuilder &MIRBuilder) {
274  // For now, just translate as a chain of conditional branches.
275  // FIXME: could we share most of the logic/code in
276  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
277  // At first sight, it seems most of the logic in there is independent of
278  // SelectionDAG-specifics and a lot of work went in to optimize switch
279  // lowering in there.
280 
281  const SwitchInst &SwInst = cast<SwitchInst>(U);
282  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
283  const BasicBlock *OrigBB = SwInst.getParent();
284 
285  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
286  for (auto &CaseIt : SwInst.cases()) {
287  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
288  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
289  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
290  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
291  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
292  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
293 
294  MIRBuilder.buildBrCond(Tst, TrueMBB);
295  CurMBB.addSuccessor(&TrueMBB);
296  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
297 
298  MachineBasicBlock *FalseMBB =
299  MF->CreateMachineBasicBlock(SwInst.getParent());
300  // Insert the comparison blocks one after the other.
301  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
302  MIRBuilder.buildBr(*FalseMBB);
303  CurMBB.addSuccessor(FalseMBB);
304 
305  MIRBuilder.setMBB(*FalseMBB);
306  }
307  // handle default case
308  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
309  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
310  MIRBuilder.buildBr(DefaultMBB);
311  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
312  CurMBB.addSuccessor(&DefaultMBB);
313  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
314 
315  return true;
316 }
317 
318 bool IRTranslator::translateIndirectBr(const User &U,
319  MachineIRBuilder &MIRBuilder) {
320  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
321 
322  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
323  MIRBuilder.buildBrIndirect(Tgt);
324 
325  // Link successors.
326  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
327  for (const BasicBlock *Succ : BrInst.successors())
328  CurBB.addSuccessor(&getMBB(*Succ));
329 
330  return true;
331 }
332 
333 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
334  const LoadInst &LI = cast<LoadInst>(U);
335 
336  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
338  Flags |= MachineMemOperand::MOLoad;
339 
340  unsigned Res = getOrCreateVReg(LI);
341  unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
342 
343  MIRBuilder.buildLoad(
344  Res, Addr,
346  Flags, DL->getTypeStoreSize(LI.getType()),
347  getMemOpAlignment(LI), AAMDNodes(), nullptr,
348  LI.getSyncScopeID(), LI.getOrdering()));
349  return true;
350 }
351 
352 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
353  const StoreInst &SI = cast<StoreInst>(U);
354  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
357 
358  unsigned Val = getOrCreateVReg(*SI.getValueOperand());
359  unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
360 
361  MIRBuilder.buildStore(
362  Val, Addr,
366  getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
367  SI.getOrdering()));
368  return true;
369 }
370 
371 bool IRTranslator::translateExtractValue(const User &U,
372  MachineIRBuilder &MIRBuilder) {
373  const Value *Src = U.getOperand(0);
375  SmallVector<Value *, 1> Indices;
376 
377  // If Src is a single element ConstantStruct, translate extractvalue
378  // to that element to avoid inserting a cast instruction.
379  if (auto CS = dyn_cast<ConstantStruct>(Src))
380  if (CS->getNumOperands() == 1) {
381  unsigned Res = getOrCreateVReg(*CS->getOperand(0));
382  ValToVReg[&U] = Res;
383  return true;
384  }
385 
386  // getIndexedOffsetInType is designed for GEPs, so the first index is the
387  // usual array element rather than looking into the actual aggregate.
388  Indices.push_back(ConstantInt::get(Int32Ty, 0));
389 
390  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
391  for (auto Idx : EVI->indices())
392  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
393  } else {
394  for (unsigned i = 1; i < U.getNumOperands(); ++i)
395  Indices.push_back(U.getOperand(i));
396  }
397 
398  uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
399 
400  unsigned Res = getOrCreateVReg(U);
401  MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset);
402 
403  return true;
404 }
405 
406 bool IRTranslator::translateInsertValue(const User &U,
407  MachineIRBuilder &MIRBuilder) {
408  const Value *Src = U.getOperand(0);
410  SmallVector<Value *, 1> Indices;
411 
412  // getIndexedOffsetInType is designed for GEPs, so the first index is the
413  // usual array element rather than looking into the actual aggregate.
414  Indices.push_back(ConstantInt::get(Int32Ty, 0));
415 
416  if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
417  for (auto Idx : IVI->indices())
418  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
419  } else {
420  for (unsigned i = 2; i < U.getNumOperands(); ++i)
421  Indices.push_back(U.getOperand(i));
422  }
423 
424  uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
425 
426  unsigned Res = getOrCreateVReg(U);
427  unsigned Inserted = getOrCreateVReg(*U.getOperand(1));
428  MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset);
429 
430  return true;
431 }
432 
433 bool IRTranslator::translateSelect(const User &U,
434  MachineIRBuilder &MIRBuilder) {
435  unsigned Res = getOrCreateVReg(U);
436  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
437  unsigned Op0 = getOrCreateVReg(*U.getOperand(1));
438  unsigned Op1 = getOrCreateVReg(*U.getOperand(2));
439  MIRBuilder.buildSelect(Res, Tst, Op0, Op1);
440  return true;
441 }
442 
443 bool IRTranslator::translateBitCast(const User &U,
444  MachineIRBuilder &MIRBuilder) {
445  // If we're bitcasting to the source type, we can reuse the source vreg.
446  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
447  getLLTForType(*U.getType(), *DL)) {
448  // Get the source vreg now, to avoid invalidating ValToVReg.
449  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
450  unsigned &Reg = ValToVReg[&U];
451  // If we already assigned a vreg for this bitcast, we can't change that.
452  // Emit a copy to satisfy the users we already emitted.
453  if (Reg)
454  MIRBuilder.buildCopy(Reg, SrcReg);
455  else
456  Reg = SrcReg;
457  return true;
458  }
459  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
460 }
461 
462 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
463  MachineIRBuilder &MIRBuilder) {
464  unsigned Op = getOrCreateVReg(*U.getOperand(0));
465  unsigned Res = getOrCreateVReg(U);
466  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
467  return true;
468 }
469 
470 bool IRTranslator::translateGetElementPtr(const User &U,
471  MachineIRBuilder &MIRBuilder) {
472  // FIXME: support vector GEPs.
473  if (U.getType()->isVectorTy())
474  return false;
475 
476  Value &Op0 = *U.getOperand(0);
477  unsigned BaseReg = getOrCreateVReg(Op0);
478  Type *PtrIRTy = Op0.getType();
479  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
480  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
481  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
482 
483  int64_t Offset = 0;
484  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
485  GTI != E; ++GTI) {
486  const Value *Idx = GTI.getOperand();
487  if (StructType *StTy = GTI.getStructTypeOrNull()) {
488  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
489  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
490  continue;
491  } else {
492  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
493 
494  // If this is a scalar constant or a splat vector of constants,
495  // handle it quickly.
496  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
497  Offset += ElementSize * CI->getSExtValue();
498  continue;
499  }
500 
501  if (Offset != 0) {
502  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
503  unsigned OffsetReg =
504  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
505  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
506 
507  BaseReg = NewBaseReg;
508  Offset = 0;
509  }
510 
511  // N = N + Idx * ElementSize;
512  unsigned ElementSizeReg =
513  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
514 
515  unsigned IdxReg = getOrCreateVReg(*Idx);
516  if (MRI->getType(IdxReg) != OffsetTy) {
517  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
518  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
519  IdxReg = NewIdxReg;
520  }
521 
522  unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
523  MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
524 
525  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
526  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
527  BaseReg = NewBaseReg;
528  }
529  }
530 
531  if (Offset != 0) {
532  unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
533  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
534  return true;
535  }
536 
537  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
538  return true;
539 }
540 
541 bool IRTranslator::translateMemfunc(const CallInst &CI,
542  MachineIRBuilder &MIRBuilder,
543  unsigned ID) {
544  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
545  Type *DstTy = CI.getArgOperand(0)->getType();
546  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
547  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
548  return false;
549 
551  for (int i = 0; i < 3; ++i) {
552  const auto &Arg = CI.getArgOperand(i);
553  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
554  }
555 
556  const char *Callee;
557  switch (ID) {
558  case Intrinsic::memmove:
559  case Intrinsic::memcpy: {
560  Type *SrcTy = CI.getArgOperand(1)->getType();
561  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
562  return false;
563  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
564  break;
565  }
566  case Intrinsic::memset:
567  Callee = "memset";
568  break;
569  default:
570  return false;
571  }
572 
573  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
574  MachineOperand::CreateES(Callee),
575  CallLowering::ArgInfo(0, CI.getType()), Args);
576 }
577 
578 void IRTranslator::getStackGuard(unsigned DstReg,
579  MachineIRBuilder &MIRBuilder) {
580  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
581  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
582  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
583  MIB.addDef(DstReg);
584 
585  auto &TLI = *MF->getSubtarget().getTargetLowering();
586  Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
587  if (!Global)
588  return;
589 
590  MachinePointerInfo MPInfo(Global);
594  *MemRefs =
595  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
596  DL->getPointerABIAlignment());
597  MIB.setMemRefs(MemRefs, MemRefs + 1);
598 }
599 
600 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
601  MachineIRBuilder &MIRBuilder) {
602  LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
603  LLT s1 = LLT::scalar(1);
604  unsigned Width = Ty.getSizeInBits();
605  unsigned Res = MRI->createGenericVirtualRegister(Ty);
606  unsigned Overflow = MRI->createGenericVirtualRegister(s1);
607  auto MIB = MIRBuilder.buildInstr(Op)
608  .addDef(Res)
609  .addDef(Overflow)
610  .addUse(getOrCreateVReg(*CI.getOperand(0)))
611  .addUse(getOrCreateVReg(*CI.getOperand(1)));
612 
613  if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
614  unsigned Zero = getOrCreateVReg(
616  MIB.addUse(Zero);
617  }
618 
619  MIRBuilder.buildSequence(getOrCreateVReg(CI), {Res, Overflow}, {0, Width});
620  return true;
621 }
622 
623 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
624  MachineIRBuilder &MIRBuilder) {
625  switch (ID) {
626  default:
627  break;
628  case Intrinsic::lifetime_start:
629  case Intrinsic::lifetime_end:
630  // Stack coloring is not enabled in O0 (which we care about now) so we can
631  // drop these. Make sure someone notices when we start compiling at higher
632  // opts though.
633  if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
634  return false;
635  return true;
636  case Intrinsic::dbg_declare: {
637  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
638  assert(DI.getVariable() && "Missing variable");
639 
640  const Value *Address = DI.getAddress();
641  if (!Address || isa<UndefValue>(Address)) {
642  DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
643  return true;
644  }
645 
647  MIRBuilder.getDebugLoc()) &&
648  "Expected inlined-at fields to agree");
649  auto AI = dyn_cast<AllocaInst>(Address);
650  if (AI && AI->isStaticAlloca()) {
651  // Static allocas are tracked at the MF level, no need for DBG_VALUE
652  // instructions (in fact, they get ignored if they *do* exist).
654  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
655  } else
656  MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
657  DI.getVariable(), DI.getExpression());
658  return true;
659  }
660  case Intrinsic::vaend:
661  // No target I know of cares about va_end. Certainly no in-tree target
662  // does. Simplest intrinsic ever!
663  return true;
664  case Intrinsic::vastart: {
665  auto &TLI = *MF->getSubtarget().getTargetLowering();
666  Value *Ptr = CI.getArgOperand(0);
667  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
668 
669  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
670  .addUse(getOrCreateVReg(*Ptr))
672  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
673  return true;
674  }
675  case Intrinsic::dbg_value: {
676  // This form of DBG_VALUE is target-independent.
677  const DbgValueInst &DI = cast<DbgValueInst>(CI);
678  const Value *V = DI.getValue();
680  MIRBuilder.getDebugLoc()) &&
681  "Expected inlined-at fields to agree");
682  if (!V) {
683  // Currently the optimizer can produce this; insert an undef to
684  // help debugging. Probably the optimizer should not do this.
685  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
686  } else if (const auto *CI = dyn_cast<Constant>(V)) {
687  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
688  } else {
689  unsigned Reg = getOrCreateVReg(*V);
690  // FIXME: This does not handle register-indirect values at offset 0. The
691  // direct/indirect thing shouldn't really be handled by something as
692  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
693  // pretty baked in right now.
694  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
695  }
696  return true;
697  }
698  case Intrinsic::uadd_with_overflow:
699  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
700  case Intrinsic::sadd_with_overflow:
701  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
702  case Intrinsic::usub_with_overflow:
703  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
704  case Intrinsic::ssub_with_overflow:
705  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
706  case Intrinsic::umul_with_overflow:
707  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
708  case Intrinsic::smul_with_overflow:
709  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
710  case Intrinsic::pow:
711  MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
712  .addDef(getOrCreateVReg(CI))
713  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
714  .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
715  return true;
716  case Intrinsic::exp:
717  MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
718  .addDef(getOrCreateVReg(CI))
719  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
720  return true;
721  case Intrinsic::exp2:
722  MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
723  .addDef(getOrCreateVReg(CI))
724  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
725  return true;
726  case Intrinsic::log:
727  MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
728  .addDef(getOrCreateVReg(CI))
729  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
730  return true;
731  case Intrinsic::log2:
732  MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
733  .addDef(getOrCreateVReg(CI))
734  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
735  return true;
736  case Intrinsic::fma:
737  MIRBuilder.buildInstr(TargetOpcode::G_FMA)
738  .addDef(getOrCreateVReg(CI))
739  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
740  .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
741  .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
742  return true;
743  case Intrinsic::memcpy:
744  case Intrinsic::memmove:
745  case Intrinsic::memset:
746  return translateMemfunc(CI, MIRBuilder, ID);
747  case Intrinsic::eh_typeid_for: {
749  unsigned Reg = getOrCreateVReg(CI);
750  unsigned TypeID = MF->getTypeIDFor(GV);
751  MIRBuilder.buildConstant(Reg, TypeID);
752  return true;
753  }
754  case Intrinsic::objectsize: {
755  // If we don't know by now, we're never going to know.
756  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
757 
758  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
759  return true;
760  }
761  case Intrinsic::stackguard:
762  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
763  return true;
764  case Intrinsic::stackprotector: {
765  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
766  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
767  getStackGuard(GuardVal, MIRBuilder);
768 
769  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
770  MIRBuilder.buildStore(
771  GuardVal, getOrCreateVReg(*Slot),
774  getOrCreateFrameIndex(*Slot)),
776  PtrTy.getSizeInBits() / 8, 8));
777  return true;
778  }
779  }
780  return false;
781 }
782 
783 bool IRTranslator::translateInlineAsm(const CallInst &CI,
784  MachineIRBuilder &MIRBuilder) {
785  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
786  if (!IA.getConstraintString().empty())
787  return false;
788 
789  unsigned ExtraInfo = 0;
790  if (IA.hasSideEffects())
791  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
792  if (IA.getDialect() == InlineAsm::AD_Intel)
793  ExtraInfo |= InlineAsm::Extra_AsmDialect;
794 
796  .addExternalSymbol(IA.getAsmString().c_str())
797  .addImm(ExtraInfo);
798 
799  return true;
800 }
801 
802 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
803  const CallInst &CI = cast<CallInst>(U);
804  auto TII = MF->getTarget().getIntrinsicInfo();
805  const Function *F = CI.getCalledFunction();
806 
807  if (CI.isInlineAsm())
808  return translateInlineAsm(CI, MIRBuilder);
809 
810  if (!F || !F->isIntrinsic()) {
811  unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
813  for (auto &Arg: CI.arg_operands())
814  Args.push_back(getOrCreateVReg(*Arg));
815 
816  MF->getFrameInfo().setHasCalls(true);
817  return CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
818  return getOrCreateVReg(*CI.getCalledValue());
819  });
820  }
821 
822  Intrinsic::ID ID = F->getIntrinsicID();
823  if (TII && ID == Intrinsic::not_intrinsic)
824  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
825 
826  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
827 
828  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
829  return true;
830 
831  unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
832  MachineInstrBuilder MIB =
833  MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
834 
835  for (auto &Arg : CI.arg_operands()) {
836  // Some intrinsics take metadata parameters. Reject them.
837  if (isa<MetadataAsValue>(Arg))
838  return false;
839  MIB.addUse(getOrCreateVReg(*Arg));
840  }
841 
842  // Add a MachineMemOperand if it is a target mem intrinsic.
843  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
845  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
846  if (TLI.getTgtMemIntrinsic(Info, CI, ID)) {
849  Flags |=
851  uint64_t Size = Info.memVT.getSizeInBits() >> 3;
853  Flags, Size, Info.align));
854  }
855 
856  return true;
857 }
858 
859 bool IRTranslator::translateInvoke(const User &U,
860  MachineIRBuilder &MIRBuilder) {
861  const InvokeInst &I = cast<InvokeInst>(U);
862  MCContext &Context = MF->getContext();
863 
864  const BasicBlock *ReturnBB = I.getSuccessor(0);
865  const BasicBlock *EHPadBB = I.getSuccessor(1);
866 
867  const Value *Callee = I.getCalledValue();
868  const Function *Fn = dyn_cast<Function>(Callee);
869  if (isa<InlineAsm>(Callee))
870  return false;
871 
872  // FIXME: support invoking patchpoint and statepoint intrinsics.
873  if (Fn && Fn->isIntrinsic())
874  return false;
875 
876  // FIXME: support whatever these are.
878  return false;
879 
880  // FIXME: support Windows exception handling.
881  if (!isa<LandingPadInst>(EHPadBB->front()))
882  return false;
883 
884  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
885  // the region covered by the try.
886  MCSymbol *BeginSymbol = Context.createTempSymbol();
887  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
888 
889  unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
891  for (auto &Arg: I.arg_operands())
892  Args.push_back(getOrCreateVReg(*Arg));
893 
894  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
895  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
896  return false;
897 
898  MCSymbol *EndSymbol = Context.createTempSymbol();
899  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
900 
901  // FIXME: track probabilities.
902  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
903  &ReturnMBB = getMBB(*ReturnBB);
904  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
905  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
906  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
907  MIRBuilder.buildBr(ReturnMBB);
908 
909  return true;
910 }
911 
912 bool IRTranslator::translateLandingPad(const User &U,
913  MachineIRBuilder &MIRBuilder) {
914  const LandingPadInst &LP = cast<LandingPadInst>(U);
915 
916  MachineBasicBlock &MBB = MIRBuilder.getMBB();
917  addLandingPadInfo(LP, MBB);
918 
919  MBB.setIsEHPad();
920 
921  // If there aren't registers to copy the values into (e.g., during SjLj
922  // exceptions), then don't bother.
923  auto &TLI = *MF->getSubtarget().getTargetLowering();
924  const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
925  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
926  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
927  return true;
928 
929  // If landingpad's return type is token type, we don't create DAG nodes
930  // for its exception pointer and selector value. The extraction of exception
931  // pointer or selector value from token type landingpads is not currently
932  // supported.
933  if (LP.getType()->isTokenTy())
934  return true;
935 
936  // Add a label to mark the beginning of the landing pad. Deletion of the
937  // landing pad can thus be detected via the MachineModuleInfo.
939  .addSym(MF->addLandingPad(&MBB));
940 
941  LLT Ty = getLLTForType(*LP.getType(), *DL);
942  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
943  MIRBuilder.buildUndef(Undef);
944 
946  for (Type *Ty : cast<StructType>(LP.getType())->elements())
947  Tys.push_back(getLLTForType(*Ty, *DL));
948  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
949 
950  // Mark exception register as live in.
951  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
952  if (!ExceptionReg)
953  return false;
954 
955  MBB.addLiveIn(ExceptionReg);
956  unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]),
957  Tmp = MRI->createGenericVirtualRegister(Ty);
958  MIRBuilder.buildCopy(VReg, ExceptionReg);
959  MIRBuilder.buildInsert(Tmp, Undef, VReg, 0);
960 
961  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
962  if (!SelectorReg)
963  return false;
964 
965  MBB.addLiveIn(SelectorReg);
966 
967  // N.b. the exception selector register always has pointer type and may not
968  // match the actual IR-level type in the landingpad so an extra cast is
969  // needed.
970  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
971  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
972 
973  VReg = MRI->createGenericVirtualRegister(Tys[1]);
974  MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT).addDef(VReg).addUse(PtrVReg);
975  MIRBuilder.buildInsert(getOrCreateVReg(LP), Tmp, VReg,
976  Tys[0].getSizeInBits());
977  return true;
978 }
979 
980 bool IRTranslator::translateAlloca(const User &U,
981  MachineIRBuilder &MIRBuilder) {
982  auto &AI = cast<AllocaInst>(U);
983 
984  if (AI.isStaticAlloca()) {
985  unsigned Res = getOrCreateVReg(AI);
986  int FI = getOrCreateFrameIndex(AI);
987  MIRBuilder.buildFrameIndex(Res, FI);
988  return true;
989  }
990 
991  // Now we're in the harder dynamic case.
992  Type *Ty = AI.getAllocatedType();
993  unsigned Align =
994  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
995 
996  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
997 
998  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
999  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1000  if (MRI->getType(NumElts) != IntPtrTy) {
1001  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1002  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1003  NumElts = ExtElts;
1004  }
1005 
1006  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1007  unsigned TySize =
1008  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1009  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1010 
1011  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1012  auto &TLI = *MF->getSubtarget().getTargetLowering();
1013  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1014 
1015  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1016  MIRBuilder.buildCopy(SPTmp, SPReg);
1017 
1018  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1019  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1020 
1021  // Handle alignment. We have to realign if the allocation granule was smaller
1022  // than stack alignment, or the specific alloca requires more than stack
1023  // alignment.
1024  unsigned StackAlign =
1026  Align = std::max(Align, StackAlign);
1027  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1028  // Round the size of the allocation up to the stack alignment size
1029  // by add SA-1 to the size. This doesn't overflow because we're computing
1030  // an address inside an alloca.
1031  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1032  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1033  AllocTmp = AlignedAlloc;
1034  }
1035 
1036  MIRBuilder.buildCopy(SPReg, AllocTmp);
1037  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1038 
1039  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1041  return true;
1042 }
1043 
1044 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1045  // FIXME: We may need more info about the type. Because of how LLT works,
1046  // we're completely discarding the i64/double distinction here (amongst
1047  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1048  // anyway but that's not guaranteed.
1049  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1050  .addDef(getOrCreateVReg(U))
1051  .addUse(getOrCreateVReg(*U.getOperand(0)))
1052  .addImm(DL->getABITypeAlignment(U.getType()));
1053  return true;
1054 }
1055 
1056 bool IRTranslator::translateInsertElement(const User &U,
1057  MachineIRBuilder &MIRBuilder) {
1058  // If it is a <1 x Ty> vector, use the scalar as it is
1059  // not a legal vector type in LLT.
1060  if (U.getType()->getVectorNumElements() == 1) {
1061  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1062  ValToVReg[&U] = Elt;
1063  return true;
1064  }
1065  unsigned Res = getOrCreateVReg(U);
1066  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1067  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1068  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1069  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1070  return true;
1071 }
1072 
1073 bool IRTranslator::translateExtractElement(const User &U,
1074  MachineIRBuilder &MIRBuilder) {
1075  // If it is a <1 x Ty> vector, use the scalar as it is
1076  // not a legal vector type in LLT.
1077  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1078  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1079  ValToVReg[&U] = Elt;
1080  return true;
1081  }
1082  unsigned Res = getOrCreateVReg(U);
1083  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1084  unsigned Idx = getOrCreateVReg(*U.getOperand(1));
1085  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1086  return true;
1087 }
1088 
1089 bool IRTranslator::translateShuffleVector(const User &U,
1090  MachineIRBuilder &MIRBuilder) {
1091  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1092  .addDef(getOrCreateVReg(U))
1093  .addUse(getOrCreateVReg(*U.getOperand(0)))
1094  .addUse(getOrCreateVReg(*U.getOperand(1)))
1095  .addUse(getOrCreateVReg(*U.getOperand(2)));
1096  return true;
1097 }
1098 
1099 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1100  const PHINode &PI = cast<PHINode>(U);
1101  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI);
1102  MIB.addDef(getOrCreateVReg(PI));
1103 
1104  PendingPHIs.emplace_back(&PI, MIB.getInstr());
1105  return true;
1106 }
1107 
1108 void IRTranslator::finishPendingPhis() {
1109  for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
1110  const PHINode *PI = Phi.first;
1111  MachineInstrBuilder MIB(*MF, Phi.second);
1112 
1113  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1114  // won't create extra control flow here, otherwise we need to find the
1115  // dominating predecessor here (or perhaps force the weirder IRTranslators
1116  // to provide a simple boundary).
1117  SmallSet<const BasicBlock *, 4> HandledPreds;
1118 
1119  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1120  auto IRPred = PI->getIncomingBlock(i);
1121  if (HandledPreds.count(IRPred))
1122  continue;
1123 
1124  HandledPreds.insert(IRPred);
1125  unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
1126  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1127  assert(Pred->isSuccessor(MIB->getParent()) &&
1128  "incorrect CFG at MachineBasicBlock level");
1129  MIB.addUse(ValReg);
1130  MIB.addMBB(Pred);
1131  }
1132  }
1133  }
1134 }
1135 
1136 bool IRTranslator::translate(const Instruction &Inst) {
1137  CurBuilder.setDebugLoc(Inst.getDebugLoc());
1138  switch(Inst.getOpcode()) {
1139 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1140  case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1141 #include "llvm/IR/Instruction.def"
1142  default:
1143  return false;
1144  }
1145 }
1146 
1147 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1148  if (auto CI = dyn_cast<ConstantInt>(&C))
1149  EntryBuilder.buildConstant(Reg, *CI);
1150  else if (auto CF = dyn_cast<ConstantFP>(&C))
1151  EntryBuilder.buildFConstant(Reg, *CF);
1152  else if (isa<UndefValue>(C))
1153  EntryBuilder.buildUndef(Reg);
1154  else if (isa<ConstantPointerNull>(C))
1155  EntryBuilder.buildConstant(Reg, 0);
1156  else if (auto GV = dyn_cast<GlobalValue>(&C))
1157  EntryBuilder.buildGlobalValue(Reg, GV);
1158  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1159  if (!CAZ->getType()->isVectorTy())
1160  return false;
1161  // Return the scalar if it is a <1 x Ty> vector.
1162  if (CAZ->getNumElements() == 1)
1163  return translate(*CAZ->getElementValue(0u), Reg);
1164  std::vector<unsigned> Ops;
1165  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1166  Constant &Elt = *CAZ->getElementValue(i);
1167  Ops.push_back(getOrCreateVReg(Elt));
1168  }
1169  EntryBuilder.buildMerge(Reg, Ops);
1170  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1171  // Return the scalar if it is a <1 x Ty> vector.
1172  if (CV->getNumElements() == 1)
1173  return translate(*CV->getElementAsConstant(0), Reg);
1174  std::vector<unsigned> Ops;
1175  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1176  Constant &Elt = *CV->getElementAsConstant(i);
1177  Ops.push_back(getOrCreateVReg(Elt));
1178  }
1179  EntryBuilder.buildMerge(Reg, Ops);
1180  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1181  switch(CE->getOpcode()) {
1182 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1183  case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1184 #include "llvm/IR/Instruction.def"
1185  default:
1186  return false;
1187  }
1188  } else if (auto CS = dyn_cast<ConstantStruct>(&C)) {
1189  // Return the element if it is a single element ConstantStruct.
1190  if (CS->getNumOperands() == 1) {
1191  unsigned EltReg = getOrCreateVReg(*CS->getOperand(0));
1192  EntryBuilder.buildCast(Reg, EltReg);
1193  return true;
1194  }
1196  SmallVector<uint64_t, 4> Indices;
1197  uint64_t Offset = 0;
1198  for (unsigned i = 0; i < CS->getNumOperands(); ++i) {
1199  unsigned OpReg = getOrCreateVReg(*CS->getOperand(i));
1200  Ops.push_back(OpReg);
1201  Indices.push_back(Offset);
1202  Offset += MRI->getType(OpReg).getSizeInBits();
1203  }
1204  EntryBuilder.buildSequence(Reg, Ops, Indices);
1205  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1206  if (CV->getNumOperands() == 1)
1207  return translate(*CV->getOperand(0), Reg);
1209  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1210  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1211  }
1212  EntryBuilder.buildMerge(Reg, Ops);
1213  } else
1214  return false;
1215 
1216  return true;
1217 }
1218 
1219 void IRTranslator::finalizeFunction() {
1220  // Release the memory used by the different maps we
1221  // needed during the translation.
1222  PendingPHIs.clear();
1223  ValToVReg.clear();
1224  FrameIndices.clear();
1225  MachinePreds.clear();
1226  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1227  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1228  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1229  EntryBuilder = MachineIRBuilder();
1230  CurBuilder = MachineIRBuilder();
1231 }
1232 
1234  MF = &CurMF;
1235  const Function &F = *MF->getFunction();
1236  if (F.empty())
1237  return false;
1238  CLI = MF->getSubtarget().getCallLowering();
1239  CurBuilder.setMF(*MF);
1240  EntryBuilder.setMF(*MF);
1241  MRI = &MF->getRegInfo();
1242  DL = &F.getParent()->getDataLayout();
1243  TPC = &getAnalysis<TargetPassConfig>();
1244  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1245 
1246  assert(PendingPHIs.empty() && "stale PHIs");
1247 
1248  // Release the per-function state when we return, whether we succeeded or not.
1249  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1250 
1251  // Setup a separate basic-block for the arguments and constants
1252  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1253  MF->push_back(EntryBB);
1254  EntryBuilder.setMBB(*EntryBB);
1255 
1256  // Create all blocks, in IR order, to preserve the layout.
1257  for (const BasicBlock &BB: F) {
1258  auto *&MBB = BBToMBB[&BB];
1259 
1260  MBB = MF->CreateMachineBasicBlock(&BB);
1261  MF->push_back(MBB);
1262 
1263  if (BB.hasAddressTaken())
1264  MBB->setHasAddressTaken();
1265  }
1266 
1267  // Make our arguments/constants entry block fallthrough to the IR entry block.
1268  EntryBB->addSuccessor(&getMBB(F.front()));
1269 
1270  // Lower the actual args into this basic block.
1271  SmallVector<unsigned, 8> VRegArgs;
1272  for (const Argument &Arg: F.args())
1273  VRegArgs.push_back(getOrCreateVReg(Arg));
1274  if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1275  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1276  MF->getFunction()->getSubprogram(),
1277  &MF->getFunction()->getEntryBlock());
1278  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1279  reportTranslationError(*MF, *TPC, *ORE, R);
1280  return false;
1281  }
1282 
1283  // And translate the function!
1284  for (const BasicBlock &BB: F) {
1285  MachineBasicBlock &MBB = getMBB(BB);
1286  // Set the insertion point of all the following translations to
1287  // the end of this basic block.
1288  CurBuilder.setMBB(MBB);
1289 
1290  for (const Instruction &Inst: BB) {
1291  if (translate(Inst))
1292  continue;
1293 
1294  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1295  Inst.getDebugLoc(), &BB);
1296  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1297 
1298  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1299  std::string InstStrStorage;
1300  raw_string_ostream InstStr(InstStrStorage);
1301  InstStr << Inst;
1302 
1303  R << ": '" << InstStr.str() << "'";
1304  }
1305 
1306  reportTranslationError(*MF, *TPC, *ORE, R);
1307  return false;
1308  }
1309  }
1310 
1311  finishPendingPhis();
1312 
1313  // Merge the argument lowering and constants block with its single
1314  // successor, the LLVM-IR entry block. We want the basic block to
1315  // be maximal.
1316  assert(EntryBB->succ_size() == 1 &&
1317  "Custom BB used for lowering should have only one successor");
1318  // Get the successor of the current entry block.
1319  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1320  assert(NewEntryBB.pred_size() == 1 &&
1321  "LLVM-IR entry block has a predecessor!?");
1322  // Move all the instruction from the current entry block to the
1323  // new entry block.
1324  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1325  EntryBB->end());
1326 
1327  // Update the live-in information for the new entry block.
1328  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1329  NewEntryBB.addLiveIn(LiveIn);
1330  NewEntryBB.sortUniqueLiveIns();
1331 
1332  // Get rid of the now empty basic block.
1333  EntryBB->removeSuccessor(&NewEntryBB);
1334  MF->remove(EntryBB);
1335  MF->DeleteMachineBasicBlock(EntryBB);
1336 
1337  assert(&MF->front() == &NewEntryBB &&
1338  "New entry wasn't next in the list of basic block!");
1339 
1340  return false;
1341 }
MachineBasicBlock & getMBB()
Getter for the basic block we currently build.
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:395
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:180
void push_back(const T &Elt)
Definition: SmallVector.h:212
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
bool empty() const
Definition: Function.h:594
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:173
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, unsigned Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable.
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
LLVMContext & Context
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res<def> = G_GEP Op0, Op1.
DiagnosticInfoOptimizationBase::Argument NV
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:103
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1339
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad. Returns the label ID for the landing pad entry.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op)
Build and insert Res<def> = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing ...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB)
Extract the exception handling information from the landingpad instruction and add them to the specif...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
Value * getCondition() const
virtual const TargetLowering * getTargetLowering() const
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:370
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:81
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:233
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:47
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:697
iterator_range< op_iterator > arg_operands()
Iteration adapter for range-for loops.
BasicBlock * getSuccessor(unsigned i) const
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:346
F(f)
An instruction for reading from memory.
Definition: Instructions.h:164
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
unsigned createGenericVirtualRegister(LLT Ty)
Create and return a new generic virtual register with low-level type Ty.
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index)
Build and insert `Res0<def>, ...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:119
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
void DeleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst, unsigned Op0, unsigned Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
bool hasSideEffects() const
Definition: InlineAsm.h:67
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
A description of a memory reference used in the backend.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
#define DEBUG_TYPE
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
Definition: CallLowering.h:159
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:109
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Class to represent struct types.
Definition: DerivedTypes.h:201
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
Reg
All possible values of the reg field in the ModR/M byte.
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:55
The memory access is dereferenceable (i.e., doesn&#39;t trap).
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val, unsigned Idx)
Build and insert Res<def> = G_EXTRACT_VECTOR_ELT Val, Idx.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:634
MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val, unsigned Elt, unsigned Idx)
Build and insert Res<def> = G_INSERT_VECTOR_ELT Val, Elt, Idx.
Context object for machine code objects.
Definition: MCContext.h:59
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
An instruction for storing to memory.
Definition: Instructions.h:306
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Value * getOperand(unsigned i) const
Definition: User.h:154
MCContext & getContext() const
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
const BasicBlock & getEntryBlock() const
Definition: Function.h:572
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:702
succ_range successors()
Definition: InstrTypes.h:267
MachineInstrBuilder buildBr(MachineBasicBlock &BB)
Build and insert G_BR Dest.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:185
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1497
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
Value * getValue() const
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res<def> = G_PTR_MASK Op0, NumBits.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:36
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:264
Indirect Branch Instruction.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
Helper class to build MachineInstr.
BasicBlock * getDefaultDest() const
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:692
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:80
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
virtual const CallLowering * getCallLowering() const
DIExpression * getExpression() const
Definition: IntrinsicInst.h:84
Value * getPointerOperand()
Definition: Instructions.h:270
void remove(iterator MBBI)
self_iterator getIterator()
Definition: ilist_node.h:82
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:81
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
void setMF(MachineFunction &)
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Get the all ones value.
Definition: Constants.cpp:261
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:870
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op)
Build and insert Res<def> = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing ...
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
This class contains a discriminated union of information about pointers in memory operands...
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:482
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
const std::string & getConstraintString() const
Definition: InlineAsm.h:82
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:102
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:639
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res<def> = G_FRAME_INDEX Idx.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:376
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:85
unsigned getNumOperands() const
Definition: User.h:176
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
void buildSequence(unsigned Res, ArrayRef< unsigned > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
MachineInstrBuilder buildCopy(unsigned Res, unsigned Op)
Build and insert Res<def> = COPY Op.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:682
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:175
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:531
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
bool isIntPredicate() const
Definition: InstrTypes.h:945
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:60
Flags
Flags values. These may be or&#39;d together.
amdgpu Simplify well known AMD library false Value Value * Arg
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:405
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:927
MachineInstrBuilder buildUndef(unsigned Dst)
Build and insert Res = IMPLICIT_DEF.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:339
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:284
bool isInlineAsm() const
Check if this call is an inline asm statement.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:515
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:656
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:358
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:194
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:245
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:676
MachineInstrBuilder buildMul(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res<def> = G_MUL Op0, Op1.
Pair of physical register and lane mask.
virtual const TargetFrameLowering * getFrameLowering() const
The memory access always returns the same value (or traps).
LLT getType(unsigned VReg) const
Get the low-level type of VReg or LLT{} if VReg is not a generic (target independent) virtual registe...
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool isUnconditional() const
AsmDialect getDialect() const
Definition: InlineAsm.h:69
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
Multiway switch.
iterator_range< op_iterator > arg_operands()
Iteration adapter for range-for loops.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
MachineInstrBuilder buildCast(unsigned Dst, unsigned Src)
Build and insert an appropriate cast between two registers of equal size.
unsigned getPointerABIAlignment(unsigned AS=0) const
Layout pointer alignment FIXME: The defaults need to be removed once all of the backends/clients are ...
Definition: DataLayout.cpp:587
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:466
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:545
LLVM Value Representation.
Definition: Value.h:73
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1260
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:388
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
This file describes how to lower LLVM calls to machine code calls.
void push_back(MachineBasicBlock *MBB)
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res<def> = G_LOAD Addr, MMO.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Invoke instruction.
#define DEBUG(X)
Definition: Debug.h:118
IRTranslator LLVM IR MI
void setRegClass(unsigned Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV)
Build and insert Res<def> = G_GLOBAL_VALUE GV.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, unsigned VReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
Definition: CallLowering.h:145
MachineInstrBuilder buildMerge(unsigned Res, ArrayRef< unsigned > Ops)
Build and insert Res<def> = G_MERGE_VALUES Op0, ...
int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created. ...
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:398
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num)
allocateMemRefsArray - Allocate an array to hold MachineMemOperand pointers.
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:193
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:729
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:855
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:65
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB)
Build and insert G_BRCOND Tst, Dest.