LLVM  4.0.0
ImplicitNullChecks.cpp
Go to the documentation of this file.
1 //===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass turns explicit null checks of the form
11 //
12 // test %r10, %r10
13 // je throw_npe
14 // movl (%r10), %esi
15 // ...
16 //
17 // to
18 //
19 // faulting_load_op("movl (%r10), %esi", throw_npe)
20 // ...
21 //
22 // With the help of a runtime that understands the .fault_maps section,
23 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
24 // a page fault.
25 //
26 //===----------------------------------------------------------------------===//
27 
28 #include "llvm/ADT/DenseSet.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
32 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/Support/Debug.h"
47 
48 using namespace llvm;
49 
50 static cl::opt<int> PageSize("imp-null-check-page-size",
51  cl::desc("The page size of the target in bytes"),
52  cl::init(4096));
53 
55  "imp-null-max-insts-to-consider",
56  cl::desc("The max number of instructions to consider hoisting loads over "
57  "(the algorithm is quadratic over this number)"),
58  cl::init(8));
59 
60 #define DEBUG_TYPE "implicit-null-checks"
61 
62 STATISTIC(NumImplicitNullChecks,
63  "Number of explicit null checks made implicit");
64 
65 namespace {
66 
67 class ImplicitNullChecks : public MachineFunctionPass {
68  /// Return true if \c computeDependence can process \p MI.
69  static bool canHandle(const MachineInstr *MI);
70 
71  /// Helper function for \c computeDependence. Return true if \p A
72  /// and \p B do not have any dependences between them, and can be
73  /// re-ordered without changing program semantics.
74  bool canReorder(const MachineInstr *A, const MachineInstr *B);
75 
76  /// A data type for representing the result computed by \c
77  /// computeDependence. States whether it is okay to reorder the
78  /// instruction passed to \c computeDependence with at most one
79  /// depednency.
80  struct DependenceResult {
81  /// Can we actually re-order \p MI with \p Insts (see \c
82  /// computeDependence).
83  bool CanReorder;
84 
85  /// If non-None, then an instruction in \p Insts that also must be
86  /// hoisted.
88 
89  /*implicit*/ DependenceResult(
90  bool CanReorder,
91  Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
92  : CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
93  assert((!PotentialDependence || CanReorder) &&
94  "!CanReorder && PotentialDependence.hasValue() not allowed!");
95  }
96  };
97 
98  /// Compute a result for the following question: can \p MI be
99  /// re-ordered from after \p Insts to before it.
100  ///
101  /// \c canHandle should return true for all instructions in \p
102  /// Insts.
103  DependenceResult computeDependence(const MachineInstr *MI,
105 
106  /// Represents one null check that can be made implicit.
107  class NullCheck {
108  // The memory operation the null check can be folded into.
109  MachineInstr *MemOperation;
110 
111  // The instruction actually doing the null check (Ptr != 0).
112  MachineInstr *CheckOperation;
113 
114  // The block the check resides in.
115  MachineBasicBlock *CheckBlock;
116 
117  // The block branched to if the pointer is non-null.
118  MachineBasicBlock *NotNullSucc;
119 
120  // The block branched to if the pointer is null.
121  MachineBasicBlock *NullSucc;
122 
123  // If this is non-null, then MemOperation has a dependency on on this
124  // instruction; and it needs to be hoisted to execute before MemOperation.
125  MachineInstr *OnlyDependency;
126 
127  public:
128  explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
129  MachineBasicBlock *checkBlock,
130  MachineBasicBlock *notNullSucc,
131  MachineBasicBlock *nullSucc,
132  MachineInstr *onlyDependency)
133  : MemOperation(memOperation), CheckOperation(checkOperation),
134  CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc),
135  OnlyDependency(onlyDependency) {}
136 
137  MachineInstr *getMemOperation() const { return MemOperation; }
138 
139  MachineInstr *getCheckOperation() const { return CheckOperation; }
140 
141  MachineBasicBlock *getCheckBlock() const { return CheckBlock; }
142 
143  MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; }
144 
145  MachineBasicBlock *getNullSucc() const { return NullSucc; }
146 
147  MachineInstr *getOnlyDependency() const { return OnlyDependency; }
148  };
149 
150  const TargetInstrInfo *TII = nullptr;
151  const TargetRegisterInfo *TRI = nullptr;
152  AliasAnalysis *AA = nullptr;
153  MachineModuleInfo *MMI = nullptr;
154 
155  bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
156  SmallVectorImpl<NullCheck> &NullCheckList);
157  MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
158  MachineBasicBlock *HandlerMBB);
159  void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
160 
161  /// Is \p MI a memory operation that can be used to implicitly null check the
162  /// value in \p PointerReg? \p PrevInsts is the set of instruction seen since
163  /// the explicit null check on \p PointerReg.
164  bool isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
165  ArrayRef<MachineInstr *> PrevInsts);
166 
167  /// Return true if \p FaultingMI can be hoisted from after the the
168  /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a
169  /// non-null value if we also need to (and legally can) hoist a depedency.
170  bool canHoistLoadInst(MachineInstr *FaultingMI, unsigned PointerReg,
171  ArrayRef<MachineInstr *> InstsSeenSoFar,
173 
174 public:
175  static char ID;
176 
177  ImplicitNullChecks() : MachineFunctionPass(ID) {
179  }
180 
181  bool runOnMachineFunction(MachineFunction &MF) override;
182  void getAnalysisUsage(AnalysisUsage &AU) const override {
185  }
186 
187  MachineFunctionProperties getRequiredProperties() const override {
190  }
191 };
192 
193 }
194 
195 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) {
196  if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects())
197  return false;
198  auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); };
199  (void)IsRegMask;
200 
201  assert(!llvm::any_of(MI->operands(), IsRegMask) &&
202  "Calls were filtered out above!");
203 
204  auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); };
205  return llvm::all_of(MI->memoperands(), IsUnordered);
206 }
207 
208 ImplicitNullChecks::DependenceResult
209 ImplicitNullChecks::computeDependence(const MachineInstr *MI,
210  ArrayRef<MachineInstr *> Block) {
211  assert(llvm::all_of(Block, canHandle) && "Check this first!");
212  assert(!llvm::is_contained(Block, MI) && "Block must be exclusive of MI!");
213 
215 
216  for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
217  if (canReorder(*I, MI))
218  continue;
219 
220  if (Dep == None) {
221  // Found one possible dependency, keep track of it.
222  Dep = I;
223  } else {
224  // We found two dependencies, so bail out.
225  return {false, None};
226  }
227  }
228 
229  return {true, Dep};
230 }
231 
232 bool ImplicitNullChecks::canReorder(const MachineInstr *A,
233  const MachineInstr *B) {
234  assert(canHandle(A) && canHandle(B) && "Precondition!");
235 
236  // canHandle makes sure that we _can_ correctly analyze the dependencies
237  // between A and B here -- for instance, we should not be dealing with heap
238  // load-store dependencies here.
239 
240  for (auto MOA : A->operands()) {
241  if (!(MOA.isReg() && MOA.getReg()))
242  continue;
243 
244  unsigned RegA = MOA.getReg();
245  for (auto MOB : B->operands()) {
246  if (!(MOB.isReg() && MOB.getReg()))
247  continue;
248 
249  unsigned RegB = MOB.getReg();
250 
251  if (TRI->regsOverlap(RegA, RegB))
252  return false;
253  }
254  }
255 
256  return true;
257 }
258 
259 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
260  TII = MF.getSubtarget().getInstrInfo();
261  TRI = MF.getRegInfo().getTargetRegisterInfo();
262  MMI = &MF.getMMI();
263  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
264 
265  SmallVector<NullCheck, 16> NullCheckList;
266 
267  for (auto &MBB : MF)
268  analyzeBlockForNullChecks(MBB, NullCheckList);
269 
270  if (!NullCheckList.empty())
271  rewriteNullChecks(NullCheckList);
272 
273  return !NullCheckList.empty();
274 }
275 
276 // Return true if any register aliasing \p Reg is live-in into \p MBB.
277 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI,
278  MachineBasicBlock *MBB, unsigned Reg) {
279  for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid();
280  ++AR)
281  if (MBB->isLiveIn(*AR))
282  return true;
283  return false;
284 }
285 
286 bool ImplicitNullChecks::isSuitableMemoryOp(
287  MachineInstr &MI, unsigned PointerReg, ArrayRef<MachineInstr *> PrevInsts) {
288  int64_t Offset;
289  unsigned BaseReg;
290 
291  if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) ||
292  BaseReg != PointerReg)
293  return false;
294 
295  // We want the load to be issued at a sane offset from PointerReg, so that
296  // if PointerReg is null then the load reliably page faults.
297  if (!(MI.mayLoad() && !MI.isPredicable() && Offset < PageSize))
298  return false;
299 
300  // Finally, we need to make sure that the load instruction actually is
301  // loading from PointerReg, and there isn't some re-definition of PointerReg
302  // between the compare and the load.
303  for (auto *PrevMI : PrevInsts)
304  for (auto &PrevMO : PrevMI->operands())
305  if (PrevMO.isReg() && PrevMO.getReg() &&
306  TRI->regsOverlap(PrevMO.getReg(), PointerReg))
307  return false;
308 
309  return true;
310 }
311 
312 bool ImplicitNullChecks::canHoistLoadInst(
313  MachineInstr *FaultingMI, unsigned PointerReg,
314  ArrayRef<MachineInstr *> InstsSeenSoFar, MachineBasicBlock *NullSucc,
316  auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar);
317  if (!DepResult.CanReorder)
318  return false;
319 
320  if (!DepResult.PotentialDependence) {
321  Dependence = nullptr;
322  return true;
323  }
324 
325  auto DependenceItr = *DepResult.PotentialDependence;
326  auto *DependenceMI = *DependenceItr;
327 
328  // We don't want to reason about speculating loads. Note -- at this point
329  // we should have already filtered out all of the other non-speculatable
330  // things, like calls and stores.
331  assert(canHandle(DependenceMI) && "Should never have reached here!");
332  if (DependenceMI->mayLoad())
333  return false;
334 
335  for (auto &DependenceMO : DependenceMI->operands()) {
336  if (!(DependenceMO.isReg() && DependenceMO.getReg()))
337  continue;
338 
339  // Make sure that we won't clobber any live ins to the sibling block by
340  // hoisting Dependency. For instance, we can't hoist INST to before the
341  // null check (even if it safe, and does not violate any dependencies in
342  // the non_null_block) if %rdx is live in to _null_block.
343  //
344  // test %rcx, %rcx
345  // je _null_block
346  // _non_null_block:
347  // %rdx<def> = INST
348  // ...
349  //
350  // This restriction does not apply to the faulting load inst because in
351  // case the pointer loaded from is in the null page, the load will not
352  // semantically execute, and affect machine state. That is, if the load
353  // was loading into %rax and it faults, the value of %rax should stay the
354  // same as it would have been had the load not have executed and we'd have
355  // branched to NullSucc directly.
356  if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg()))
357  return false;
358 
359  // The Dependency can't be re-defining the base register -- then we won't
360  // get the memory operation on the address we want. This is already
361  // checked in \c IsSuitableMemoryOp.
362  assert(!TRI->regsOverlap(DependenceMO.getReg(), PointerReg) &&
363  "Should have been checked before!");
364  }
365 
366  auto DepDepResult =
367  computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr});
368 
369  if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence)
370  return false;
371 
372  Dependence = DependenceMI;
373  return true;
374 }
375 
376 /// Analyze MBB to check if its terminating branch can be turned into an
377 /// implicit null check. If yes, append a description of the said null check to
378 /// NullCheckList and return true, else return false.
379 bool ImplicitNullChecks::analyzeBlockForNullChecks(
381  typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
382 
383  MDNode *BranchMD = nullptr;
384  if (auto *BB = MBB.getBasicBlock())
385  BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit);
386 
387  if (!BranchMD)
388  return false;
389 
390  MachineBranchPredicate MBP;
391 
392  if (TII->analyzeBranchPredicate(MBB, MBP, true))
393  return false;
394 
395  // Is the predicate comparing an integer to zero?
396  if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
397  (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
398  MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
399  return false;
400 
401  // If we cannot erase the test instruction itself, then making the null check
402  // implicit does not buy us much.
403  if (!MBP.SingleUseCondition)
404  return false;
405 
406  MachineBasicBlock *NotNullSucc, *NullSucc;
407 
408  if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
409  NotNullSucc = MBP.TrueDest;
410  NullSucc = MBP.FalseDest;
411  } else {
412  NotNullSucc = MBP.FalseDest;
413  NullSucc = MBP.TrueDest;
414  }
415 
416  // We handle the simplest case for now. We can potentially do better by using
417  // the machine dominator tree.
418  if (NotNullSucc->pred_size() != 1)
419  return false;
420 
421  // Starting with a code fragment like:
422  //
423  // test %RAX, %RAX
424  // jne LblNotNull
425  //
426  // LblNull:
427  // callq throw_NullPointerException
428  //
429  // LblNotNull:
430  // Inst0
431  // Inst1
432  // ...
433  // Def = Load (%RAX + <offset>)
434  // ...
435  //
436  //
437  // we want to end up with
438  //
439  // Def = FaultingLoad (%RAX + <offset>), LblNull
440  // jmp LblNotNull ;; explicit or fallthrough
441  //
442  // LblNotNull:
443  // Inst0
444  // Inst1
445  // ...
446  //
447  // LblNull:
448  // callq throw_NullPointerException
449  //
450  //
451  // To see why this is legal, consider the two possibilities:
452  //
453  // 1. %RAX is null: since we constrain <offset> to be less than PageSize, the
454  // load instruction dereferences the null page, causing a segmentation
455  // fault.
456  //
457  // 2. %RAX is not null: in this case we know that the load cannot fault, as
458  // otherwise the load would've faulted in the original program too and the
459  // original program would've been undefined.
460  //
461  // This reasoning cannot be extended to justify hoisting through arbitrary
462  // control flow. For instance, in the example below (in pseudo-C)
463  //
464  // if (ptr == null) { throw_npe(); unreachable; }
465  // if (some_cond) { return 42; }
466  // v = ptr->field; // LD
467  // ...
468  //
469  // we cannot (without code duplication) use the load marked "LD" to null check
470  // ptr -- clause (2) above does not apply in this case. In the above program
471  // the safety of ptr->field can be dependent on some_cond; and, for instance,
472  // ptr could be some non-null invalid reference that never gets loaded from
473  // because some_cond is always true.
474 
475  const unsigned PointerReg = MBP.LHS.getReg();
476 
477  SmallVector<MachineInstr *, 8> InstsSeenSoFar;
478 
479  for (auto &MI : *NotNullSucc) {
480  if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider)
481  return false;
482 
483  MachineInstr *Dependence;
484  if (isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar) &&
485  canHoistLoadInst(&MI, PointerReg, InstsSeenSoFar, NullSucc,
486  Dependence)) {
487  NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
488  NullSucc, Dependence);
489  return true;
490  }
491 
492  InstsSeenSoFar.push_back(&MI);
493  }
494 
495  return false;
496 }
497 
498 /// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
499 /// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI
500 /// (defining the same register), and branches to HandlerMBB if the load
501 /// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
502 MachineInstr *
503 ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
504  MachineBasicBlock *MBB,
505  MachineBasicBlock *HandlerMBB) {
506  const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for
507  // all targets.
508 
509  DebugLoc DL;
510  unsigned NumDefs = LoadMI->getDesc().getNumDefs();
511  assert(NumDefs <= 1 && "other cases unhandled!");
512 
513  unsigned DefReg = NoRegister;
514  if (NumDefs != 0) {
515  DefReg = LoadMI->defs().begin()->getReg();
516  assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
517  "expected exactly one def!");
518  }
519 
520  auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
521  .addMBB(HandlerMBB)
522  .addImm(LoadMI->getOpcode());
523 
524  for (auto &MO : LoadMI->uses())
525  MIB.addOperand(MO);
526 
527  MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
528 
529  return MIB;
530 }
531 
532 /// Rewrite the null checks in NullCheckList into implicit null checks.
533 void ImplicitNullChecks::rewriteNullChecks(
535  DebugLoc DL;
536 
537  for (auto &NC : NullCheckList) {
538  // Remove the conditional branch dependent on the null check.
539  unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock());
540  (void)BranchesRemoved;
541  assert(BranchesRemoved > 0 && "expected at least one branch!");
542 
543  if (auto *DepMI = NC.getOnlyDependency()) {
544  DepMI->removeFromParent();
545  NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI);
546  }
547 
548  // Insert a faulting load where the conditional branch was originally. We
549  // check earlier ensures that this bit of code motion is legal. We do not
550  // touch the successors list for any basic block since we haven't changed
551  // control flow, we've just made it implicit.
552  MachineInstr *FaultingLoad = insertFaultingLoad(
553  NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc());
554  // Now the values defined by MemOperation, if any, are live-in of
555  // the block of MemOperation.
556  // The original load operation may define implicit-defs alongside
557  // the loaded value.
558  MachineBasicBlock *MBB = NC.getMemOperation()->getParent();
559  for (const MachineOperand &MO : FaultingLoad->operands()) {
560  if (!MO.isReg() || !MO.isDef())
561  continue;
562  unsigned Reg = MO.getReg();
563  if (!Reg || MBB->isLiveIn(Reg))
564  continue;
565  MBB->addLiveIn(Reg);
566  }
567 
568  if (auto *DepMI = NC.getOnlyDependency()) {
569  for (auto &MO : DepMI->operands()) {
570  if (!MO.isReg() || !MO.getReg() || !MO.isDef())
571  continue;
572  if (!NC.getNotNullSucc()->isLiveIn(MO.getReg()))
573  NC.getNotNullSucc()->addLiveIn(MO.getReg());
574  }
575  }
576 
577  NC.getMemOperation()->eraseFromParent();
578  NC.getCheckOperation()->eraseFromParent();
579 
580  // Insert an *unconditional* branch to not-null successor.
581  TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr,
582  /*Cond=*/None, DL);
583 
584  NumImplicitNullChecks++;
585  }
586 }
587 
588 
589 char ImplicitNullChecks::ID = 0;
591 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
592  "Implicit null checks", false, false)
594 INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
595  "Implicit null checks", false, false)
const NoneType None
Definition: None.h:23
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
STATISTIC(NumFunctions,"Total number of functions")
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:334
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:216
INITIALIZE_PASS_BEGIN(ImplicitNullChecks,"implicit-null-checks","Implicit null checks", false, false) INITIALIZE_PASS_END(ImplicitNullChecks
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:605
iterator end() const
Definition: ArrayRef.h:130
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:478
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:270
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:736
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:830
iterator_range< mmo_iterator > memoperands()
Definition: MachineInstr.h:365
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:301
Represents a predicate at the MachineFunction level.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:53
A description of a memory reference used in the backend.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
const TargetRegisterInfo * getTargetRegisterInfo() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:592
implicit null Implicit null false
Reg
All possible values of the reg field in the ModR/M byte.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
MachineBasicBlock * MBB
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096))
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
static cl::opt< unsigned > MaxInstsToConsider("imp-null-max-insts-to-consider", cl::desc("The max number of instructions to consider hoisting loads over ""(the algorithm is quadratic over this number)"), cl::init(8))
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
TargetInstrInfo - Interface to description of machine instruction set.
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:359
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MCRegAliasIterator enumerates all registers aliasing Reg.
Represent the analysis usage information of a pass.
bool any_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:743
uint32_t Offset
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
Definition: MachineInstr.h:323
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
iterator begin() const
Definition: ArrayRef.h:129
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
void initializeImplicitNullChecksPass(PassRegistry &)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
char & ImplicitNullChecksID
ImplicitNullChecks - This pass folds null pointer checks into nearby memory operations.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
#define NC
Definition: regutils.h:42
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
MachineFunctionProperties & set(Property P)
Representation of each machine instruction.
Definition: MachineInstr.h:52
implicit null checks
void emplace_back(ArgTypes &&...Args)
Definition: SmallVector.h:635
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, MachineBasicBlock *MBB, unsigned Reg)
#define I(x, y, z)
Definition: MD5.cpp:54
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:424
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
virtual const TargetInstrInfo * getInstrInfo() const
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
IRTranslator LLVM IR MI
MachineModuleInfo & getMMI() const
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const override
Get the base register and byte offset of a load/store instr.
Dependence - This class represents a dependence between two memory memory references in a function...
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
unsigned pred_size() const
Properties which a MachineFunction may have at a given point in time.
This class contains meta information specific to a module.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:358
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:783