LLVM  4.0.0
IRTranslator.cpp
Go to the documentation of this file.
1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
14 
15 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
31 
32 #define DEBUG_TYPE "irtranslator"
33 
34 using namespace llvm;
35 
36 char IRTranslator::ID = 0;
37 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
38  false, false)
40 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
41  false, false)
42 
43 static void reportTranslationError(const Value &V, const Twine &Message) {
44  std::string ErrStorage;
45  raw_string_ostream Err(ErrStorage);
46  Err << Message << ": " << V << '\n';
47  report_fatal_error(Err.str());
48 }
49 
52 }
53 
57 }
58 
59 
60 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
61  unsigned &ValReg = ValToVReg[&Val];
62  // Check if this is the first time we see Val.
63  if (!ValReg) {
64  // Fill ValRegsSequence with the sequence of registers
65  // we need to concat together to produce the value.
66  assert(Val.getType()->isSized() &&
67  "Don't know how to create an empty vreg");
68  unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
69  ValReg = VReg;
70 
71  if (auto CV = dyn_cast<Constant>(&Val)) {
72  bool Success = translate(*CV, VReg);
73  if (!Success) {
74  if (!TPC->isGlobalISelAbortEnabled()) {
75  MF->getProperties().set(
77  return VReg;
78  }
79  reportTranslationError(Val, "unable to translate constant");
80  }
81  }
82  }
83  return ValReg;
84 }
85 
86 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
87  if (FrameIndices.find(&AI) != FrameIndices.end())
88  return FrameIndices[&AI];
89 
90  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
91  unsigned Size =
92  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
93 
94  // Always allocate at least one byte.
95  Size = std::max(Size, 1u);
96 
97  unsigned Alignment = AI.getAlignment();
98  if (!Alignment)
99  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
100 
101  int &FI = FrameIndices[&AI];
102  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
103  return FI;
104 }
105 
106 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
107  unsigned Alignment = 0;
108  Type *ValTy = nullptr;
109  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
110  Alignment = SI->getAlignment();
111  ValTy = SI->getValueOperand()->getType();
112  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
113  Alignment = LI->getAlignment();
114  ValTy = LI->getType();
115  } else if (!TPC->isGlobalISelAbortEnabled()) {
116  MF->getProperties().set(
118  return 1;
119  } else
120  llvm_unreachable("unhandled memory instruction");
121 
122  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
123 }
124 
125 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
126  MachineBasicBlock *&MBB = BBToMBB[&BB];
127  if (!MBB) {
128  MBB = MF->CreateMachineBasicBlock(&BB);
129  MF->push_back(MBB);
130 
131  if (BB.hasAddressTaken())
132  MBB->setHasAddressTaken();
133  }
134  return *MBB;
135 }
136 
137 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
138  MachineIRBuilder &MIRBuilder) {
139  // FIXME: handle signed/unsigned wrapping flags.
140 
141  // Get or create a virtual register for each value.
142  // Unless the value is a Constant => loadimm cst?
143  // or inline constant each time?
144  // Creation of a virtual register needs to have a size.
145  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
146  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
147  unsigned Res = getOrCreateVReg(U);
148  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
149  return true;
150 }
151 
152 bool IRTranslator::translateCompare(const User &U,
153  MachineIRBuilder &MIRBuilder) {
154  const CmpInst *CI = dyn_cast<CmpInst>(&U);
155  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
156  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
157  unsigned Res = getOrCreateVReg(U);
158  CmpInst::Predicate Pred =
159  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
160  cast<ConstantExpr>(U).getPredicate());
161 
162  if (CmpInst::isIntPredicate(Pred))
163  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
164  else
165  MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
166 
167  return true;
168 }
169 
170 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
171  const ReturnInst &RI = cast<ReturnInst>(U);
172  const Value *Ret = RI.getReturnValue();
173  // The target may mess up with the insertion point, but
174  // this is not important as a return is the last instruction
175  // of the block anyway.
176  return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
177 }
178 
179 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
180  const BranchInst &BrInst = cast<BranchInst>(U);
181  unsigned Succ = 0;
182  if (!BrInst.isUnconditional()) {
183  // We want a G_BRCOND to the true BB followed by an unconditional branch.
184  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
185  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
186  MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
187  MIRBuilder.buildBrCond(Tst, TrueBB);
188  }
189 
190  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
191  MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
192  MIRBuilder.buildBr(TgtBB);
193 
194  // Link successors.
195  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
196  for (const BasicBlock *Succ : BrInst.successors())
197  CurBB.addSuccessor(&getOrCreateBB(*Succ));
198  return true;
199 }
200 
201 bool IRTranslator::translateSwitch(const User &U,
202  MachineIRBuilder &MIRBuilder) {
203  // For now, just translate as a chain of conditional branches.
204  // FIXME: could we share most of the logic/code in
205  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
206  // At first sight, it seems most of the logic in there is independent of
207  // SelectionDAG-specifics and a lot of work went in to optimize switch
208  // lowering in there.
209 
210  const SwitchInst &SwInst = cast<SwitchInst>(U);
211  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
212 
213  LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL);
214  for (auto &CaseIt : SwInst.cases()) {
215  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
216  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
217  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
218  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
219  MachineBasicBlock &TrueBB = getOrCreateBB(*CaseIt.getCaseSuccessor());
220 
221  MIRBuilder.buildBrCond(Tst, TrueBB);
222  CurBB.addSuccessor(&TrueBB);
223 
224  MachineBasicBlock *FalseBB =
225  MF->CreateMachineBasicBlock(SwInst.getParent());
226  MF->push_back(FalseBB);
227  MIRBuilder.buildBr(*FalseBB);
228  CurBB.addSuccessor(FalseBB);
229 
230  MIRBuilder.setMBB(*FalseBB);
231  }
232  // handle default case
233  MachineBasicBlock &DefaultBB = getOrCreateBB(*SwInst.getDefaultDest());
234  MIRBuilder.buildBr(DefaultBB);
235  MIRBuilder.getMBB().addSuccessor(&DefaultBB);
236 
237  return true;
238 }
239 
240 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
241  const LoadInst &LI = cast<LoadInst>(U);
242 
243  if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
244  return false;
245 
246  assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
249  Flags |= MachineMemOperand::MOLoad;
250 
251  unsigned Res = getOrCreateVReg(LI);
252  unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
253  LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
254  MIRBuilder.buildLoad(
255  Res, Addr,
257  Flags, DL->getTypeStoreSize(LI.getType()),
258  getMemOpAlignment(LI)));
259  return true;
260 }
261 
262 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
263  const StoreInst &SI = cast<StoreInst>(U);
264 
265  if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
266  return false;
267 
268  assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
269  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
272 
273  unsigned Val = getOrCreateVReg(*SI.getValueOperand());
274  unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
275  LLT VTy{*SI.getValueOperand()->getType(), *DL},
276  PTy{*SI.getPointerOperand()->getType(), *DL};
277 
278  MIRBuilder.buildStore(
279  Val, Addr,
283  getMemOpAlignment(SI)));
284  return true;
285 }
286 
287 bool IRTranslator::translateExtractValue(const User &U,
288  MachineIRBuilder &MIRBuilder) {
289  const Value *Src = U.getOperand(0);
291  SmallVector<Value *, 1> Indices;
292 
293  // getIndexedOffsetInType is designed for GEPs, so the first index is the
294  // usual array element rather than looking into the actual aggregate.
295  Indices.push_back(ConstantInt::get(Int32Ty, 0));
296 
297  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
298  for (auto Idx : EVI->indices())
299  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
300  } else {
301  for (unsigned i = 1; i < U.getNumOperands(); ++i)
302  Indices.push_back(U.getOperand(i));
303  }
304 
305  uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
306 
307  unsigned Res = getOrCreateVReg(U);
308  MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
309 
310  return true;
311 }
312 
313 bool IRTranslator::translateInsertValue(const User &U,
314  MachineIRBuilder &MIRBuilder) {
315  const Value *Src = U.getOperand(0);
316  Type *Int32Ty = Type::getInt32Ty(U.getContext());
317  SmallVector<Value *, 1> Indices;
318 
319  // getIndexedOffsetInType is designed for GEPs, so the first index is the
320  // usual array element rather than looking into the actual aggregate.
321  Indices.push_back(ConstantInt::get(Int32Ty, 0));
322 
323  if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
324  for (auto Idx : IVI->indices())
325  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
326  } else {
327  for (unsigned i = 2; i < U.getNumOperands(); ++i)
328  Indices.push_back(U.getOperand(i));
329  }
330 
331  uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
332 
333  unsigned Res = getOrCreateVReg(U);
334  const Value &Inserted = *U.getOperand(1);
335  MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
336  Offset);
337 
338  return true;
339 }
340 
341 bool IRTranslator::translateSelect(const User &U,
342  MachineIRBuilder &MIRBuilder) {
343  MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
344  getOrCreateVReg(*U.getOperand(1)),
345  getOrCreateVReg(*U.getOperand(2)));
346  return true;
347 }
348 
349 bool IRTranslator::translateBitCast(const User &U,
350  MachineIRBuilder &MIRBuilder) {
351  if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
352  unsigned &Reg = ValToVReg[&U];
353  if (Reg)
354  MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
355  else
356  Reg = getOrCreateVReg(*U.getOperand(0));
357  return true;
358  }
359  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
360 }
361 
362 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
363  MachineIRBuilder &MIRBuilder) {
364  unsigned Op = getOrCreateVReg(*U.getOperand(0));
365  unsigned Res = getOrCreateVReg(U);
366  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
367  return true;
368 }
369 
370 bool IRTranslator::translateGetElementPtr(const User &U,
371  MachineIRBuilder &MIRBuilder) {
372  // FIXME: support vector GEPs.
373  if (U.getType()->isVectorTy())
374  return false;
375 
376  Value &Op0 = *U.getOperand(0);
377  unsigned BaseReg = getOrCreateVReg(Op0);
378  LLT PtrTy{*Op0.getType(), *DL};
379  unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
380  LLT OffsetTy = LLT::scalar(PtrSize);
381 
382  int64_t Offset = 0;
383  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
384  GTI != E; ++GTI) {
385  const Value *Idx = GTI.getOperand();
386  if (StructType *StTy = GTI.getStructTypeOrNull()) {
387  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
388  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
389  continue;
390  } else {
391  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
392 
393  // If this is a scalar constant or a splat vector of constants,
394  // handle it quickly.
395  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
396  Offset += ElementSize * CI->getSExtValue();
397  continue;
398  }
399 
400  if (Offset != 0) {
401  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
402  unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
403  MIRBuilder.buildConstant(OffsetReg, Offset);
404  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
405 
406  BaseReg = NewBaseReg;
407  Offset = 0;
408  }
409 
410  // N = N + Idx * ElementSize;
411  unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
412  MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
413 
414  unsigned IdxReg = getOrCreateVReg(*Idx);
415  if (MRI->getType(IdxReg) != OffsetTy) {
416  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
417  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
418  IdxReg = NewIdxReg;
419  }
420 
421  unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
422  MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
423 
424  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
425  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
426  BaseReg = NewBaseReg;
427  }
428  }
429 
430  if (Offset != 0) {
431  unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
432  MIRBuilder.buildConstant(OffsetReg, Offset);
433  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
434  return true;
435  }
436 
437  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
438  return true;
439 }
440 
441 bool IRTranslator::translateMemcpy(const CallInst &CI,
442  MachineIRBuilder &MIRBuilder) {
443  LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
444  if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() !=
445  0 ||
446  cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() !=
447  0 ||
448  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
449  return false;
450 
452  for (int i = 0; i < 3; ++i) {
453  const auto &Arg = CI.getArgOperand(i);
454  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
455  }
456 
457  MachineOperand Callee = MachineOperand::CreateES("memcpy");
458 
459  return CLI->lowerCall(MIRBuilder, Callee,
460  CallLowering::ArgInfo(0, CI.getType()), Args);
461 }
462 
463 void IRTranslator::getStackGuard(unsigned DstReg,
464  MachineIRBuilder &MIRBuilder) {
465  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
466  MIB.addDef(DstReg);
467 
468  auto &TLI = *MF->getSubtarget().getTargetLowering();
469  Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
470  if (!Global)
471  return;
472 
473  MachinePointerInfo MPInfo(Global);
477  *MemRefs =
478  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
479  DL->getPointerABIAlignment());
480  MIB.setMemRefs(MemRefs, MemRefs + 1);
481 }
482 
483 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
484  MachineIRBuilder &MIRBuilder) {
485  LLT Ty{*CI.getOperand(0)->getType(), *DL};
486  LLT s1 = LLT::scalar(1);
487  unsigned Width = Ty.getSizeInBits();
488  unsigned Res = MRI->createGenericVirtualRegister(Ty);
489  unsigned Overflow = MRI->createGenericVirtualRegister(s1);
490  auto MIB = MIRBuilder.buildInstr(Op)
491  .addDef(Res)
492  .addDef(Overflow)
493  .addUse(getOrCreateVReg(*CI.getOperand(0)))
494  .addUse(getOrCreateVReg(*CI.getOperand(1)));
495 
496  if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
497  unsigned Zero = MRI->createGenericVirtualRegister(s1);
498  EntryBuilder.buildConstant(Zero, 0);
499  MIB.addUse(Zero);
500  }
501 
502  MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
503  return true;
504 }
505 
506 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
507  MachineIRBuilder &MIRBuilder) {
508  switch (ID) {
509  default:
510  break;
511  case Intrinsic::dbg_declare:
512  case Intrinsic::dbg_value:
513  // FIXME: these obviously need to be supported properly.
514  MF->getProperties().set(
516  return true;
517  case Intrinsic::uadd_with_overflow:
518  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
519  case Intrinsic::sadd_with_overflow:
520  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
521  case Intrinsic::usub_with_overflow:
522  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
523  case Intrinsic::ssub_with_overflow:
524  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
525  case Intrinsic::umul_with_overflow:
526  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
527  case Intrinsic::smul_with_overflow:
528  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
529  case Intrinsic::memcpy:
530  return translateMemcpy(CI, MIRBuilder);
531  case Intrinsic::eh_typeid_for: {
533  unsigned Reg = getOrCreateVReg(CI);
534  unsigned TypeID = MF->getTypeIDFor(GV);
535  MIRBuilder.buildConstant(Reg, TypeID);
536  return true;
537  }
538  case Intrinsic::objectsize: {
539  // If we don't know by now, we're never going to know.
540  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
541 
542  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
543  return true;
544  }
545  case Intrinsic::stackguard:
546  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
547  return true;
548  case Intrinsic::stackprotector: {
549  LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
550  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
551  getStackGuard(GuardVal, MIRBuilder);
552 
553  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
554  MIRBuilder.buildStore(
555  GuardVal, getOrCreateVReg(*Slot),
558  getOrCreateFrameIndex(*Slot)),
560  PtrTy.getSizeInBits() / 8, 8));
561  return true;
562  }
563  }
564  return false;
565 }
566 
567 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
568  const CallInst &CI = cast<CallInst>(U);
569  auto TII = MF->getTarget().getIntrinsicInfo();
570  const Function *F = CI.getCalledFunction();
571 
572  if (!F || !F->isIntrinsic()) {
573  unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
575  for (auto &Arg: CI.arg_operands())
576  Args.push_back(getOrCreateVReg(*Arg));
577 
578  return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
579  return getOrCreateVReg(*CI.getCalledValue());
580  });
581  }
582 
583  Intrinsic::ID ID = F->getIntrinsicID();
584  if (TII && ID == Intrinsic::not_intrinsic)
585  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
586 
587  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
588 
589  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
590  return true;
591 
592  unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
593  MachineInstrBuilder MIB =
594  MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
595 
596  for (auto &Arg : CI.arg_operands()) {
597  if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
598  MIB.addImm(CI->getSExtValue());
599  else
600  MIB.addUse(getOrCreateVReg(*Arg));
601  }
602  return true;
603 }
604 
605 bool IRTranslator::translateInvoke(const User &U,
606  MachineIRBuilder &MIRBuilder) {
607  const InvokeInst &I = cast<InvokeInst>(U);
608  MCContext &Context = MF->getContext();
609 
610  const BasicBlock *ReturnBB = I.getSuccessor(0);
611  const BasicBlock *EHPadBB = I.getSuccessor(1);
612 
613  const Value *Callee(I.getCalledValue());
614  const Function *Fn = dyn_cast<Function>(Callee);
615  if (isa<InlineAsm>(Callee))
616  return false;
617 
618  // FIXME: support invoking patchpoint and statepoint intrinsics.
619  if (Fn && Fn->isIntrinsic())
620  return false;
621 
622  // FIXME: support whatever these are.
624  return false;
625 
626  // FIXME: support Windows exception handling.
627  if (!isa<LandingPadInst>(EHPadBB->front()))
628  return false;
629 
630 
631  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
632  // the region covered by the try.
633  MCSymbol *BeginSymbol = Context.createTempSymbol();
634  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
635 
636  unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
638  for (auto &Arg: I.arg_operands())
639  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
640 
641  if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0),
642  CallLowering::ArgInfo(Res, I.getType()), Args))
643  return false;
644 
645  MCSymbol *EndSymbol = Context.createTempSymbol();
646  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
647 
648  // FIXME: track probabilities.
649  MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB),
650  &ReturnMBB = getOrCreateBB(*ReturnBB);
651  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
652  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
653  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
654 
655  return true;
656 }
657 
658 bool IRTranslator::translateLandingPad(const User &U,
659  MachineIRBuilder &MIRBuilder) {
660  const LandingPadInst &LP = cast<LandingPadInst>(U);
661 
662  MachineBasicBlock &MBB = MIRBuilder.getMBB();
663  addLandingPadInfo(LP, MBB);
664 
665  MBB.setIsEHPad();
666 
667  // If there aren't registers to copy the values into (e.g., during SjLj
668  // exceptions), then don't bother.
669  auto &TLI = *MF->getSubtarget().getTargetLowering();
670  const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
671  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
672  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
673  return true;
674 
675  // If landingpad's return type is token type, we don't create DAG nodes
676  // for its exception pointer and selector value. The extraction of exception
677  // pointer or selector value from token type landingpads is not currently
678  // supported.
679  if (LP.getType()->isTokenTy())
680  return true;
681 
682  // Add a label to mark the beginning of the landing pad. Deletion of the
683  // landing pad can thus be detected via the MachineModuleInfo.
685  .addSym(MF->addLandingPad(&MBB));
686 
687  // Mark exception register as live in.
690  LLT p0 = LLT::pointer(0, DL->getPointerSizeInBits());
691  if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) {
692  unsigned VReg = MRI->createGenericVirtualRegister(p0);
693  MIRBuilder.buildCopy(VReg, Reg);
694  Regs.push_back(VReg);
695  Offsets.push_back(0);
696  }
697 
698  if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) {
699  unsigned VReg = MRI->createGenericVirtualRegister(p0);
700  MIRBuilder.buildCopy(VReg, Reg);
701  Regs.push_back(VReg);
702  Offsets.push_back(p0.getSizeInBits());
703  }
704 
705  MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets);
706  return true;
707 }
708 
709 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI,
710  MachineIRBuilder &MIRBuilder) {
711  if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
712  return false;
713 
714  assert(AI.isStaticAlloca() && "only handle static allocas now");
715  unsigned Res = getOrCreateVReg(AI);
716  int FI = getOrCreateFrameIndex(AI);
717  MIRBuilder.buildFrameIndex(Res, FI);
718  return true;
719 }
720 
721 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
722  const PHINode &PI = cast<PHINode>(U);
723  auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
724  MIB.addDef(getOrCreateVReg(PI));
725 
726  PendingPHIs.emplace_back(&PI, MIB.getInstr());
727  return true;
728 }
729 
730 void IRTranslator::finishPendingPhis() {
731  for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
732  const PHINode *PI = Phi.first;
733  MachineInstrBuilder MIB(*MF, Phi.second);
734 
735  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
736  // won't create extra control flow here, otherwise we need to find the
737  // dominating predecessor here (or perhaps force the weirder IRTranslators
738  // to provide a simple boundary).
739  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
740  assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) &&
741  "I appear to have misunderstood Machine PHIs");
742  MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i)));
743  MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]);
744  }
745  }
746 }
747 
748 bool IRTranslator::translate(const Instruction &Inst) {
749  CurBuilder.setDebugLoc(Inst.getDebugLoc());
750  switch(Inst.getOpcode()) {
751 #define HANDLE_INST(NUM, OPCODE, CLASS) \
752  case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
753 #include "llvm/IR/Instruction.def"
754  default:
755  if (!TPC->isGlobalISelAbortEnabled())
756  return false;
757  llvm_unreachable("unknown opcode");
758  }
759 }
760 
761 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
762  if (auto CI = dyn_cast<ConstantInt>(&C))
763  EntryBuilder.buildConstant(Reg, *CI);
764  else if (auto CF = dyn_cast<ConstantFP>(&C))
765  EntryBuilder.buildFConstant(Reg, *CF);
766  else if (isa<UndefValue>(C))
767  EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
768  else if (isa<ConstantPointerNull>(C))
769  EntryBuilder.buildConstant(Reg, 0);
770  else if (auto GV = dyn_cast<GlobalValue>(&C))
771  EntryBuilder.buildGlobalValue(Reg, GV);
772  else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
773  switch(CE->getOpcode()) {
774 #define HANDLE_INST(NUM, OPCODE, CLASS) \
775  case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
776 #include "llvm/IR/Instruction.def"
777  default:
778  if (!TPC->isGlobalISelAbortEnabled())
779  return false;
780  llvm_unreachable("unknown opcode");
781  }
782  } else if (!TPC->isGlobalISelAbortEnabled())
783  return false;
784  else
785  llvm_unreachable("unhandled constant kind");
786 
787  return true;
788 }
789 
790 void IRTranslator::finalizeFunction() {
791  // Release the memory used by the different maps we
792  // needed during the translation.
793  PendingPHIs.clear();
794  ValToVReg.clear();
795  FrameIndices.clear();
796  Constants.clear();
797 }
798 
800  MF = &CurMF;
801  const Function &F = *MF->getFunction();
802  if (F.empty())
803  return false;
804  CLI = MF->getSubtarget().getCallLowering();
805  CurBuilder.setMF(*MF);
806  EntryBuilder.setMF(*MF);
807  MRI = &MF->getRegInfo();
808  DL = &F.getParent()->getDataLayout();
809  TPC = &getAnalysis<TargetPassConfig>();
810 
811  assert(PendingPHIs.empty() && "stale PHIs");
812 
813  // Setup a separate basic-block for the arguments and constants, falling
814  // through to the IR-level Function's entry block.
816  MF->push_back(EntryBB);
817  EntryBB->addSuccessor(&getOrCreateBB(F.front()));
818  EntryBuilder.setMBB(*EntryBB);
819 
820  // Lower the actual args into this basic block.
821  SmallVector<unsigned, 8> VRegArgs;
822  for (const Argument &Arg: F.args())
823  VRegArgs.push_back(getOrCreateVReg(Arg));
824  bool Succeeded = CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs);
825  if (!Succeeded) {
826  if (!TPC->isGlobalISelAbortEnabled()) {
827  MF->getProperties().set(
829  finalizeFunction();
830  return false;
831  }
832  report_fatal_error("Unable to lower arguments");
833  }
834 
835  // And translate the function!
836  for (const BasicBlock &BB: F) {
837  MachineBasicBlock &MBB = getOrCreateBB(BB);
838  // Set the insertion point of all the following translations to
839  // the end of this basic block.
840  CurBuilder.setMBB(MBB);
841 
842  for (const Instruction &Inst: BB) {
843  Succeeded &= translate(Inst);
844  if (!Succeeded) {
845  if (TPC->isGlobalISelAbortEnabled())
846  reportTranslationError(Inst, "unable to translate instruction");
847  MF->getProperties().set(
849  break;
850  }
851  }
852  }
853 
854  if (Succeeded) {
855  finishPendingPhis();
856 
857  // Now that the MachineFrameInfo has been configured, no further changes to
858  // the reserved registers are possible.
859  MRI->freezeReservedRegs(*MF);
860 
861  // Merge the argument lowering and constants block with its single
862  // successor, the LLVM-IR entry block. We want the basic block to
863  // be maximal.
864  assert(EntryBB->succ_size() == 1 &&
865  "Custom BB used for lowering should have only one successor");
866  // Get the successor of the current entry block.
867  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
868  assert(NewEntryBB.pred_size() == 1 &&
869  "LLVM-IR entry block has a predecessor!?");
870  // Move all the instruction from the current entry block to the
871  // new entry block.
872  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
873  EntryBB->end());
874 
875  // Update the live-in information for the new entry block.
876  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
877  NewEntryBB.addLiveIn(LiveIn);
878  NewEntryBB.sortUniqueLiveIns();
879 
880  // Get rid of the now empty basic block.
881  EntryBB->removeSuccessor(&NewEntryBB);
882  MF->remove(EntryBB);
883 
884  assert(&MF->front() == &NewEntryBB &&
885  "New entry wasn't next in the list of basic block!");
886  }
887 
888  finalizeFunction();
889 
890  return false;
891 }
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, unsigned VReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
Definition: CallLowering.h:115
MachineBasicBlock & getMBB()
Getter for the basic block we currently build.
void initializeIRTranslatorPass(PassRegistry &)
BasicBlock * getSuccessor(unsigned i) const
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:391
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
void push_back(const T &Elt)
Definition: SmallVector.h:211
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:870
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:166
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
LLVM Argument representation.
Definition: Argument.h:34
LLVMContext & Context
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res<def> = G_GEP Op0, Op1.
LLT getType(unsigned VReg) const
Get the low-level type of VReg or LLT{} if VReg is not a generic (target independent) virtual registe...
size_t i
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:336
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad. Returns the label ID for the landing pad entry.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB)
Extract the exception handling information from the landingpad instruction and add them to the specif...
unsigned getNumOperands() const
Definition: User.h:167
This class represents a function call, abstracting a target machine's calling convention.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:151
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
gep_type_iterator gep_type_end(const User *GEP)
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:777
iterator_range< op_iterator > arg_operands()
Iteration adapter for range-for loops.
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:192
const Instruction & front() const
Definition: BasicBlock.h:240
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
An instruction for reading from memory.
Definition: Instructions.h:164
virtual const CallLowering * getCallLowering() const
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
unsigned createGenericVirtualRegister(LLT Ty)
Create and return a new generic virtual register with low-level type Ty.
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst, unsigned Op0, unsigned Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:53
bool isUnconditional() const
A description of a memory reference used in the backend.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
#define DEBUG_TYPE
struct fuzzer::@269 Flags
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
const HexagonInstrInfo * TII
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:566
unsigned getPointerABIAlignment(unsigned AS=0) const
Layout pointer alignment FIXME: The defaults need to be removed once all of the backends/clients are ...
Definition: DataLayout.cpp:590
Class to represent struct types.
Definition: DerivedTypes.h:199
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
const MachineFunctionProperties & getProperties() const
Get the function properties.
void freezeReservedRegs(const MachineFunction &)
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:308
MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1218
Reg
All possible values of the reg field in the ModR/M byte.
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
The memory access is dereferenceable (i.e., doesn't trap).
virtual bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
Windows NT (Windows on ARM)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Target-Independent Code Generator Pass Configuration Options.
MachineInstrBuilder buildExtract(ArrayRef< unsigned > Results, ArrayRef< uint64_t > Indices, unsigned Src)
Build and insert `Res0<def>, ...
Context object for machine code objects.
Definition: MCContext.h:51
const MachineBasicBlock & front() const
#define F(x, y, z)
Definition: MD5.cpp:51
MachineBasicBlock * MBB
const RegList & Regs
BasicBlock * getSuccessor(unsigned i) const
An instruction for storing to memory.
Definition: Instructions.h:300
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:51
MCContext & getContext() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
unsigned getNumIncomingValues() const
Return the number of incoming edges.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:517
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:254
The memory access is volatile.
succ_range successors()
Definition: InstrTypes.h:280
MachineInstrBuilder buildBr(MachineBasicBlock &BB)
Build and insert G_BR Dest.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
IRTranslator LLVM IR false
unsigned const MachineRegisterInfo * MRI
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:219
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
Definition: Constant.h:42
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:109
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
Helper class to build MachineInstr.
bool isIntPredicate() const
Definition: InstrTypes.h:978
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:259
Represent the analysis usage information of a pass.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, unsigned Res, unsigned Op0, unsigned Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
uint32_t Offset
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:880
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
bool doesNotAccessMemory() const
Determine if the call does not access memory.
Value * getOperand(unsigned i) const
Definition: User.h:145
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:736
Value * getPointerOperand()
Definition: Instructions.h:270
void remove(iterator MBBI)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:960
void setMF(MachineFunction &)
Setters for the insertion point.
MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op)
Build and insert Res<def> = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing ...
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:654
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string's reference.
Definition: raw_ostream.h:479
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:594
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res<def> = G_FRAME_INDEX Idx.
The memory access writes data.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:689
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:408
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE,"IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
MachineInstrBuilder buildCopy(unsigned Res, unsigned Op)
Build and insert Res<def> = COPY Op.
virtual const TargetLowering * getTargetLowering() const
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:218
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:153
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:558
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:198
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:146
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
void setHasAddressTaken()
Set this block to reflect that it potentially is the target of an indirect branch.
static char ID
Definition: IRTranslator.h:50
bool empty() const
Definition: Function.h:541
Value * getCondition() const
MachineFunctionProperties & set(Property P)
The memory access reads data.
#define Success
MachineInstrBuilder buildSequence(unsigned Res, ArrayRef< unsigned > Ops, ArrayRef< uint64_t > Indices)
Build and insert Res<def> = G_SEQUENCE Op0, Idx0...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index, ArgTys...Args)
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:384
Value * getCondition() const
void emplace_back(ArgTypes &&...Args)
Definition: SmallVector.h:635
BasicBlock * getDefaultDest() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:169
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
Definition: CallLowering.h:129
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
#define I(x, y, z)
Definition: MD5.cpp:54
MachineInstrBuilder buildMul(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res<def> = G_MUL Op0, Op1.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:349
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
static volatile int Zero
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:391
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
IRTranslator LLVM IR static false void reportTranslationError(const Value &V, const Twine &Message)
Multiway switch.
iterator_range< op_iterator > arg_operands()
Iteration adapter for range-for loops.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const BasicBlock & front() const
Definition: Function.h:542
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:463
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space (defaulting to 0).
Definition: LowLevelType.h:57
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
This file describes how to lower LLVM calls to machine code calls.
void push_back(MachineBasicBlock *MBB)
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res<def> = G_LOAD Addr, MMO.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
Invoke instruction.
IRTranslator LLVM IR MI
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:104
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1382
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:102
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV)
Build and insert Res<def> = G_GLOBAL_VALUE GV.
Value * getPointerOperand()
Definition: Instructions.h:394
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num)
allocateMemRefsArray - Allocate an array to hold MachineMemOperand pointers.
Statically lint checks LLVM IR
Definition: Lint.cpp:192
const BasicBlock * getParent() const
Definition: Instruction.h:62
iterator_range< arg_iterator > args()
Definition: Function.h:568
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB)
Build and insert G_BRCOND Tst, Dest.