LLVM  14.0.0git
X86FlagsCopyLowering.cpp
Go to the documentation of this file.
1 //====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual
11 /// flag bits.
12 ///
13 /// We have to do this by carefully analyzing and rewriting the usage of the
14 /// copied EFLAGS register because there is no general way to rematerialize the
15 /// entire EFLAGS register safely and efficiently. Using `popf` both forces
16 /// dynamic stack adjustment and can create correctness issues due to IF, TF,
17 /// and other non-status flags being overwritten. Using sequences involving
18 /// SAHF don't work on all x86 processors and are often quite slow compared to
19 /// directly testing a single status preserved in its own GPR.
20 ///
21 //===----------------------------------------------------------------------===//
22 
23 #include "X86.h"
24 #include "X86InstrBuilder.h"
25 #include "X86InstrInfo.h"
26 #include "X86Subtarget.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/ScopeExit.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallSet.h"
34 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/Statistic.h"
52 #include "llvm/IR/DebugLoc.h"
53 #include "llvm/MC/MCSchedule.h"
54 #include "llvm/Pass.h"
56 #include "llvm/Support/Debug.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <iterator>
61 #include <utility>
62 
63 using namespace llvm;
64 
65 #define PASS_KEY "x86-flags-copy-lowering"
66 #define DEBUG_TYPE PASS_KEY
67 
68 STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated");
69 STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted");
70 STATISTIC(NumTestsInserted, "Number of test instructions inserted");
71 STATISTIC(NumAddsInserted, "Number of adds instructions inserted");
72 
73 namespace {
74 
75 // Convenient array type for storing registers associated with each condition.
76 using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>;
77 
78 class X86FlagsCopyLoweringPass : public MachineFunctionPass {
79 public:
80  X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) { }
81 
82  StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
83  bool runOnMachineFunction(MachineFunction &MF) override;
84  void getAnalysisUsage(AnalysisUsage &AU) const override;
85 
86  /// Pass identification, replacement for typeid.
87  static char ID;
88 
89 private:
90  MachineRegisterInfo *MRI = nullptr;
91  const X86Subtarget *Subtarget = nullptr;
92  const X86InstrInfo *TII = nullptr;
93  const TargetRegisterInfo *TRI = nullptr;
94  const TargetRegisterClass *PromoteRC = nullptr;
95  MachineDominatorTree *MDT = nullptr;
96 
97  CondRegArray collectCondsInRegs(MachineBasicBlock &MBB,
99 
100  Register promoteCondToReg(MachineBasicBlock &MBB,
102  const DebugLoc &TestLoc, X86::CondCode Cond);
103  std::pair<unsigned, bool> getCondOrInverseInReg(
105  const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs);
106  void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
107  const DebugLoc &Loc, unsigned Reg);
108 
109  void rewriteArithmetic(MachineBasicBlock &TestMBB,
111  const DebugLoc &TestLoc, MachineInstr &MI,
112  MachineOperand &FlagUse, CondRegArray &CondRegs);
113  void rewriteCMov(MachineBasicBlock &TestMBB,
114  MachineBasicBlock::iterator TestPos, const DebugLoc &TestLoc,
115  MachineInstr &CMovI, MachineOperand &FlagUse,
116  CondRegArray &CondRegs);
117  void rewriteFCMov(MachineBasicBlock &TestMBB,
119  const DebugLoc &TestLoc, MachineInstr &CMovI,
120  MachineOperand &FlagUse, CondRegArray &CondRegs);
121  void rewriteCondJmp(MachineBasicBlock &TestMBB,
123  const DebugLoc &TestLoc, MachineInstr &JmpI,
124  CondRegArray &CondRegs);
125  void rewriteCopy(MachineInstr &MI, MachineOperand &FlagUse,
126  MachineInstr &CopyDefI);
127  void rewriteSetCC(MachineBasicBlock &TestMBB,
129  const DebugLoc &TestLoc, MachineInstr &SetCCI,
130  MachineOperand &FlagUse, CondRegArray &CondRegs);
131 };
132 
133 } // end anonymous namespace
134 
135 INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringPass, DEBUG_TYPE,
136  "X86 EFLAGS copy lowering", false, false)
137 INITIALIZE_PASS_END(X86FlagsCopyLoweringPass, DEBUG_TYPE,
138  "X86 EFLAGS copy lowering", false, false)
139 
141  return new X86FlagsCopyLoweringPass();
142 }
143 
145 
146 void X86FlagsCopyLoweringPass::getAnalysisUsage(AnalysisUsage &AU) const {
149 }
150 
151 namespace {
152 /// An enumeration of the arithmetic instruction mnemonics which have
153 /// interesting flag semantics.
154 ///
155 /// We can map instruction opcodes into these mnemonics to make it easy to
156 /// dispatch with specific functionality.
157 enum class FlagArithMnemonic {
158  ADC,
159  ADCX,
160  ADOX,
161  RCL,
162  RCR,
163  SBB,
164  SETB,
165 };
166 } // namespace
167 
168 static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) {
169  switch (Opcode) {
170  default:
171  report_fatal_error("No support for lowering a copy into EFLAGS when used "
172  "by this instruction!");
173 
174 #define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX) \
175  case X86::MNEMONIC##8##SUFFIX: \
176  case X86::MNEMONIC##16##SUFFIX: \
177  case X86::MNEMONIC##32##SUFFIX: \
178  case X86::MNEMONIC##64##SUFFIX:
179 
180 #define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \
181  LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \
182  LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr_REV) \
183  LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \
184  LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \
185  case X86::MNEMONIC##8ri: \
186  case X86::MNEMONIC##16ri8: \
187  case X86::MNEMONIC##32ri8: \
188  case X86::MNEMONIC##64ri8: \
189  case X86::MNEMONIC##16ri: \
190  case X86::MNEMONIC##32ri: \
191  case X86::MNEMONIC##64ri32: \
192  case X86::MNEMONIC##8mi: \
193  case X86::MNEMONIC##16mi8: \
194  case X86::MNEMONIC##32mi8: \
195  case X86::MNEMONIC##64mi8: \
196  case X86::MNEMONIC##16mi: \
197  case X86::MNEMONIC##32mi: \
198  case X86::MNEMONIC##64mi32: \
199  case X86::MNEMONIC##8i8: \
200  case X86::MNEMONIC##16i16: \
201  case X86::MNEMONIC##32i32: \
202  case X86::MNEMONIC##64i32:
203 
205  return FlagArithMnemonic::ADC;
206 
208  return FlagArithMnemonic::SBB;
209 
210 #undef LLVM_EXPAND_ADC_SBB_INSTR
211 
212  LLVM_EXPAND_INSTR_SIZES(RCL, rCL)
214  LLVM_EXPAND_INSTR_SIZES(RCL, ri)
215  return FlagArithMnemonic::RCL;
216 
217  LLVM_EXPAND_INSTR_SIZES(RCR, rCL)
219  LLVM_EXPAND_INSTR_SIZES(RCR, ri)
220  return FlagArithMnemonic::RCR;
221 
222 #undef LLVM_EXPAND_INSTR_SIZES
223 
224  case X86::ADCX32rr:
225  case X86::ADCX64rr:
226  case X86::ADCX32rm:
227  case X86::ADCX64rm:
228  return FlagArithMnemonic::ADCX;
229 
230  case X86::ADOX32rr:
231  case X86::ADOX64rr:
232  case X86::ADOX32rm:
233  case X86::ADOX64rm:
234  return FlagArithMnemonic::ADOX;
235 
236  case X86::SETB_C32r:
237  case X86::SETB_C64r:
238  return FlagArithMnemonic::SETB;
239  }
240 }
241 
243  MachineInstr &SplitI,
244  const X86InstrInfo &TII) {
245  MachineFunction &MF = *MBB.getParent();
246 
247  assert(SplitI.getParent() == &MBB &&
248  "Split instruction must be in the split block!");
249  assert(SplitI.isBranch() &&
250  "Only designed to split a tail of branch instructions!");
252  "Must split on an actual jCC instruction!");
253 
254  // Dig out the previous instruction to the split point.
255  MachineInstr &PrevI = *std::prev(SplitI.getIterator());
256  assert(PrevI.isBranch() && "Must split after a branch!");
258  "Must split after an actual jCC instruction!");
259  assert(!std::prev(PrevI.getIterator())->isTerminator() &&
260  "Must only have this one terminator prior to the split!");
261 
262  // Grab the one successor edge that will stay in `MBB`.
263  MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(0).getMBB();
264 
265  // Analyze the original block to see if we are actually splitting an edge
266  // into two edges. This can happen when we have multiple conditional jumps to
267  // the same successor.
268  bool IsEdgeSplit =
269  std::any_of(SplitI.getIterator(), MBB.instr_end(),
270  [&](MachineInstr &MI) {
271  assert(MI.isTerminator() &&
272  "Should only have spliced terminators!");
273  return llvm::any_of(
274  MI.operands(), [&](MachineOperand &MOp) {
275  return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc;
276  });
277  }) ||
278  MBB.getFallThrough() == &UnsplitSucc;
279 
280  MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
281 
282  // Insert the new block immediately after the current one. Any existing
283  // fallthrough will be sunk into this new block anyways.
284  MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
285 
286  // Splice the tail of instructions into the new block.
287  NewMBB.splice(NewMBB.end(), &MBB, SplitI.getIterator(), MBB.end());
288 
289  // Copy the necessary succesors (and their probability info) into the new
290  // block.
291  for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
292  if (IsEdgeSplit || *SI != &UnsplitSucc)
293  NewMBB.copySuccessor(&MBB, SI);
294  // Normalize the probabilities if we didn't end up splitting the edge.
295  if (!IsEdgeSplit)
296  NewMBB.normalizeSuccProbs();
297 
298  // Now replace all of the moved successors in the original block with the new
299  // block. This will merge their probabilities.
300  for (MachineBasicBlock *Succ : NewMBB.successors())
301  if (Succ != &UnsplitSucc)
302  MBB.replaceSuccessor(Succ, &NewMBB);
303 
304  // We should always end up replacing at least one successor.
305  assert(MBB.isSuccessor(&NewMBB) &&
306  "Failed to make the new block a successor!");
307 
308  // Now update all the PHIs.
309  for (MachineBasicBlock *Succ : NewMBB.successors()) {
310  for (MachineInstr &MI : *Succ) {
311  if (!MI.isPHI())
312  break;
313 
314  for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
315  OpIdx += 2) {
316  MachineOperand &OpV = MI.getOperand(OpIdx);
317  MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
318  assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
319  if (OpMBB.getMBB() != &MBB)
320  continue;
321 
322  // Replace the operand for unsplit successors
323  if (!IsEdgeSplit || Succ != &UnsplitSucc) {
324  OpMBB.setMBB(&NewMBB);
325 
326  // We have to continue scanning as there may be multiple entries in
327  // the PHI.
328  continue;
329  }
330 
331  // When we have split the edge append a new successor.
332  MI.addOperand(MF, OpV);
333  MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
334  break;
335  }
336  }
337  }
338 
339  return NewMBB;
340 }
341 
342 static X86::CondCode getCondFromFCMOV(unsigned Opcode) {
343  switch (Opcode) {
344  default: return X86::COND_INVALID;
345  case X86::CMOVBE_Fp32: case X86::CMOVBE_Fp64: case X86::CMOVBE_Fp80:
346  return X86::COND_BE;
347  case X86::CMOVB_Fp32: case X86::CMOVB_Fp64: case X86::CMOVB_Fp80:
348  return X86::COND_B;
349  case X86::CMOVE_Fp32: case X86::CMOVE_Fp64: case X86::CMOVE_Fp80:
350  return X86::COND_E;
351  case X86::CMOVNBE_Fp32: case X86::CMOVNBE_Fp64: case X86::CMOVNBE_Fp80:
352  return X86::COND_A;
353  case X86::CMOVNB_Fp32: case X86::CMOVNB_Fp64: case X86::CMOVNB_Fp80:
354  return X86::COND_AE;
355  case X86::CMOVNE_Fp32: case X86::CMOVNE_Fp64: case X86::CMOVNE_Fp80:
356  return X86::COND_NE;
357  case X86::CMOVNP_Fp32: case X86::CMOVNP_Fp64: case X86::CMOVNP_Fp80:
358  return X86::COND_NP;
359  case X86::CMOVP_Fp32: case X86::CMOVP_Fp64: case X86::CMOVP_Fp80:
360  return X86::COND_P;
361  }
362 }
363 
364 bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
365  LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
366  << " **********\n");
367 
368  Subtarget = &MF.getSubtarget<X86Subtarget>();
369  MRI = &MF.getRegInfo();
370  TII = Subtarget->getInstrInfo();
371  TRI = Subtarget->getRegisterInfo();
372  MDT = &getAnalysis<MachineDominatorTree>();
373  PromoteRC = &X86::GR8RegClass;
374 
375  if (MF.begin() == MF.end())
376  // Nothing to do for a degenerate empty function...
377  return false;
378 
379  // Collect the copies in RPO so that when there are chains where a copy is in
380  // turn copied again we visit the first one first. This ensures we can find
381  // viable locations for testing the original EFLAGS that dominate all the
382  // uses across complex CFGs.
385  for (MachineBasicBlock *MBB : RPOT)
386  for (MachineInstr &MI : *MBB)
387  if (MI.getOpcode() == TargetOpcode::COPY &&
388  MI.getOperand(0).getReg() == X86::EFLAGS)
389  Copies.push_back(&MI);
390 
391  for (MachineInstr *CopyI : Copies) {
392  MachineBasicBlock &MBB = *CopyI->getParent();
393 
394  MachineOperand &VOp = CopyI->getOperand(1);
395  assert(VOp.isReg() &&
396  "The input to the copy for EFLAGS should always be a register!");
397  MachineInstr &CopyDefI = *MRI->getVRegDef(VOp.getReg());
398  if (CopyDefI.getOpcode() != TargetOpcode::COPY) {
399  // FIXME: The big likely candidate here are PHI nodes. We could in theory
400  // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard
401  // enough that it is probably better to change every other part of LLVM
402  // to avoid creating them. The issue is that once we have PHIs we won't
403  // know which original EFLAGS value we need to capture with our setCCs
404  // below. The end result will be computing a complete set of setCCs that
405  // we *might* want, computing them in every place where we copy *out* of
406  // EFLAGS and then doing SSA formation on all of them to insert necessary
407  // PHI nodes and consume those here. Then hoping that somehow we DCE the
408  // unnecessary ones. This DCE seems very unlikely to be successful and so
409  // we will almost certainly end up with a glut of dead setCC
410  // instructions. Until we have a motivating test case and fail to avoid
411  // it by changing other parts of LLVM's lowering, we refuse to handle
412  // this complex case here.
413  LLVM_DEBUG(
414  dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
415  CopyDefI.dump());
417  "Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
418  }
419 
420  auto Cleanup = make_scope_exit([&] {
421  // All uses of the EFLAGS copy are now rewritten, kill the copy into
422  // eflags and if dead the copy from.
423  CopyI->eraseFromParent();
424  if (MRI->use_empty(CopyDefI.getOperand(0).getReg()))
425  CopyDefI.eraseFromParent();
426  ++NumCopiesEliminated;
427  });
428 
429  MachineOperand &DOp = CopyI->getOperand(0);
430  assert(DOp.isDef() && "Expected register def!");
431  assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!");
432  if (DOp.isDead())
433  continue;
434 
435  MachineBasicBlock *TestMBB = CopyDefI.getParent();
436  auto TestPos = CopyDefI.getIterator();
437  DebugLoc TestLoc = CopyDefI.getDebugLoc();
438 
439  LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
440 
441  // Walk up across live-in EFLAGS to find where they were actually def'ed.
442  //
443  // This copy's def may just be part of a region of blocks covered by
444  // a single def of EFLAGS and we want to find the top of that region where
445  // possible.
446  //
447  // This is essentially a search for a *candidate* reaching definition
448  // location. We don't need to ever find the actual reaching definition here,
449  // but we want to walk up the dominator tree to find the highest point which
450  // would be viable for such a definition.
451  auto HasEFLAGSClobber = [&](MachineBasicBlock::iterator Begin,
453  // Scan backwards as we expect these to be relatively short and often find
454  // a clobber near the end.
455  return llvm::any_of(
456  llvm::reverse(llvm::make_range(Begin, End)), [&](MachineInstr &MI) {
457  // Flag any instruction (other than the copy we are
458  // currently rewriting) that defs EFLAGS.
459  return &MI != CopyI && MI.findRegisterDefOperand(X86::EFLAGS);
460  });
461  };
462  auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB,
463  MachineBasicBlock *EndMBB) {
464  assert(MDT->dominates(BeginMBB, EndMBB) &&
465  "Only support paths down the dominator tree!");
468  // We terminate at the beginning. No need to scan it.
469  Visited.insert(BeginMBB);
470  Worklist.push_back(EndMBB);
471  do {
472  auto *MBB = Worklist.pop_back_val();
473  for (auto *PredMBB : MBB->predecessors()) {
474  if (!Visited.insert(PredMBB).second)
475  continue;
476  if (HasEFLAGSClobber(PredMBB->begin(), PredMBB->end()))
477  return true;
478  // Enqueue this block to walk its predecessors.
479  Worklist.push_back(PredMBB);
480  }
481  } while (!Worklist.empty());
482  // No clobber found along a path from the begin to end.
483  return false;
484  };
485  while (TestMBB->isLiveIn(X86::EFLAGS) && !TestMBB->pred_empty() &&
486  !HasEFLAGSClobber(TestMBB->begin(), TestPos)) {
487  // Find the nearest common dominator of the predecessors, as
488  // that will be the best candidate to hoist into.
489  MachineBasicBlock *HoistMBB =
490  std::accumulate(std::next(TestMBB->pred_begin()), TestMBB->pred_end(),
491  *TestMBB->pred_begin(),
492  [&](MachineBasicBlock *LHS, MachineBasicBlock *RHS) {
493  return MDT->findNearestCommonDominator(LHS, RHS);
494  });
495 
496  // Now we need to scan all predecessors that may be reached along paths to
497  // the hoist block. A clobber anywhere in any of these blocks the hoist.
498  // Note that this even handles loops because we require *no* clobbers.
499  if (HasEFLAGSClobberPath(HoistMBB, TestMBB))
500  break;
501 
502  // We also need the terminators to not sneakily clobber flags.
503  if (HasEFLAGSClobber(HoistMBB->getFirstTerminator()->getIterator(),
504  HoistMBB->instr_end()))
505  break;
506 
507  // We found a viable location, hoist our test position to it.
508  TestMBB = HoistMBB;
509  TestPos = TestMBB->getFirstTerminator()->getIterator();
510  // Clear the debug location as it would just be confusing after hoisting.
511  TestLoc = DebugLoc();
512  }
513  LLVM_DEBUG({
514  auto DefIt = llvm::find_if(
515  llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)),
516  [&](MachineInstr &MI) {
517  return MI.findRegisterDefOperand(X86::EFLAGS);
518  });
519  if (DefIt.base() != TestMBB->instr_begin()) {
520  dbgs() << " Using EFLAGS defined by: ";
521  DefIt->dump();
522  } else {
523  dbgs() << " Using live-in flags for BB:\n";
524  TestMBB->dump();
525  }
526  });
527 
528  // While rewriting uses, we buffer jumps and rewrite them in a second pass
529  // because doing so will perturb the CFG that we are walking to find the
530  // uses in the first place.
532 
533  // Gather the condition flags that have already been preserved in
534  // registers. We do this from scratch each time as we expect there to be
535  // very few of them and we expect to not revisit the same copy definition
536  // many times. If either of those change sufficiently we could build a map
537  // of these up front instead.
538  CondRegArray CondRegs = collectCondsInRegs(*TestMBB, TestPos);
539 
540  // Collect the basic blocks we need to scan. Typically this will just be
541  // a single basic block but we may have to scan multiple blocks if the
542  // EFLAGS copy lives into successors.
545  Blocks.push_back(&MBB);
546 
547  do {
548  MachineBasicBlock &UseMBB = *Blocks.pop_back_val();
549 
550  // Track when if/when we find a kill of the flags in this block.
551  bool FlagsKilled = false;
552 
553  // In most cases, we walk from the beginning to the end of the block. But
554  // when the block is the same block as the copy is from, we will visit it
555  // twice. The first time we start from the copy and go to the end. The
556  // second time we start from the beginning and go to the copy. This lets
557  // us handle copies inside of cycles.
558  // FIXME: This loop is *super* confusing. This is at least in part
559  // a symptom of all of this routine needing to be refactored into
560  // documentable components. Once done, there may be a better way to write
561  // this loop.
562  for (auto MII = (&UseMBB == &MBB && !VisitedBlocks.count(&UseMBB))
563  ? std::next(CopyI->getIterator())
564  : UseMBB.instr_begin(),
565  MIE = UseMBB.instr_end();
566  MII != MIE;) {
567  MachineInstr &MI = *MII++;
568  // If we are in the original copy block and encounter either the copy
569  // def or the copy itself, break so that we don't re-process any part of
570  // the block or process the instructions in the range that was copied
571  // over.
572  if (&MI == CopyI || &MI == &CopyDefI) {
573  assert(&UseMBB == &MBB && VisitedBlocks.count(&MBB) &&
574  "Should only encounter these on the second pass over the "
575  "original block.");
576  break;
577  }
578 
579  MachineOperand *FlagUse = MI.findRegisterUseOperand(X86::EFLAGS);
580  if (!FlagUse) {
581  if (MI.findRegisterDefOperand(X86::EFLAGS)) {
582  // If EFLAGS are defined, it's as-if they were killed. We can stop
583  // scanning here.
584  //
585  // NB!!! Many instructions only modify some flags. LLVM currently
586  // models this as clobbering all flags, but if that ever changes
587  // this will need to be carefully updated to handle that more
588  // complex logic.
589  FlagsKilled = true;
590  break;
591  }
592  continue;
593  }
594 
595  LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
596 
597  // Check the kill flag before we rewrite as that may change it.
598  if (FlagUse->isKill())
599  FlagsKilled = true;
600 
601  // Once we encounter a branch, the rest of the instructions must also be
602  // branches. We can't rewrite in place here, so we handle them below.
603  //
604  // Note that we don't have to handle tail calls here, even conditional
605  // tail calls, as those are not introduced into the X86 MI until post-RA
606  // branch folding or black placement. As a consequence, we get to deal
607  // with the simpler formulation of conditional branches followed by tail
608  // calls.
610  auto JmpIt = MI.getIterator();
611  do {
612  JmpIs.push_back(&*JmpIt);
613  ++JmpIt;
614  } while (JmpIt != UseMBB.instr_end() &&
615  X86::getCondFromBranch(*JmpIt) !=
617  break;
618  }
619 
620  // Otherwise we can just rewrite in-place.
622  rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
623  } else if (getCondFromFCMOV(MI.getOpcode()) != X86::COND_INVALID) {
624  rewriteFCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
625  } else if (X86::getCondFromSETCC(MI) != X86::COND_INVALID) {
626  rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
627  } else if (MI.getOpcode() == TargetOpcode::COPY) {
628  rewriteCopy(MI, *FlagUse, CopyDefI);
629  } else {
630  // We assume all other instructions that use flags also def them.
631  assert(MI.findRegisterDefOperand(X86::EFLAGS) &&
632  "Expected a def of EFLAGS for this instruction!");
633 
634  // NB!!! Several arithmetic instructions only *partially* update
635  // flags. Theoretically, we could generate MI code sequences that
636  // would rely on this fact and observe different flags independently.
637  // But currently LLVM models all of these instructions as clobbering
638  // all the flags in an undef way. We rely on that to simplify the
639  // logic.
640  FlagsKilled = true;
641 
642  // Generically handle remaining uses as arithmetic instructions.
643  rewriteArithmetic(*TestMBB, TestPos, TestLoc, MI, *FlagUse,
644  CondRegs);
645  }
646 
647  // If this was the last use of the flags, we're done.
648  if (FlagsKilled)
649  break;
650  }
651 
652  // If the flags were killed, we're done with this block.
653  if (FlagsKilled)
654  continue;
655 
656  // Otherwise we need to scan successors for ones where the flags live-in
657  // and queue those up for processing.
658  for (MachineBasicBlock *SuccMBB : UseMBB.successors())
659  if (SuccMBB->isLiveIn(X86::EFLAGS) &&
660  VisitedBlocks.insert(SuccMBB).second) {
661  // We currently don't do any PHI insertion and so we require that the
662  // test basic block dominates all of the use basic blocks. Further, we
663  // can't have a cycle from the test block back to itself as that would
664  // create a cycle requiring a PHI to break it.
665  //
666  // We could in theory do PHI insertion here if it becomes useful by
667  // just taking undef values in along every edge that we don't trace
668  // this EFLAGS copy along. This isn't as bad as fully general PHI
669  // insertion, but still seems like a great deal of complexity.
670  //
671  // Because it is theoretically possible that some earlier MI pass or
672  // other lowering transformation could induce this to happen, we do
673  // a hard check even in non-debug builds here.
674  if (SuccMBB == TestMBB || !MDT->dominates(TestMBB, SuccMBB)) {
675  LLVM_DEBUG({
676  dbgs()
677  << "ERROR: Encountered use that is not dominated by our test "
678  "basic block! Rewriting this would require inserting PHI "
679  "nodes to track the flag state across the CFG.\n\nTest "
680  "block:\n";
681  TestMBB->dump();
682  dbgs() << "Use block:\n";
683  SuccMBB->dump();
684  });
686  "Cannot lower EFLAGS copy when original copy def "
687  "does not dominate all uses.");
688  }
689 
690  Blocks.push_back(SuccMBB);
691 
692  // After this, EFLAGS will be recreated before each use.
693  SuccMBB->removeLiveIn(X86::EFLAGS);
694  }
695  } while (!Blocks.empty());
696 
697  // Now rewrite the jumps that use the flags. These we handle specially
698  // because if there are multiple jumps in a single basic block we'll have
699  // to do surgery on the CFG.
700  MachineBasicBlock *LastJmpMBB = nullptr;
701  for (MachineInstr *JmpI : JmpIs) {
702  // Past the first jump within a basic block we need to split the blocks
703  // apart.
704  if (JmpI->getParent() == LastJmpMBB)
705  splitBlock(*JmpI->getParent(), *JmpI, *TII);
706  else
707  LastJmpMBB = JmpI->getParent();
708 
709  rewriteCondJmp(*TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
710  }
711 
712  // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
713  // the copy's def operand is itself a kill.
714  }
715 
716 #ifndef NDEBUG
717  for (MachineBasicBlock &MBB : MF)
718  for (MachineInstr &MI : MBB)
719  if (MI.getOpcode() == TargetOpcode::COPY &&
720  (MI.getOperand(0).getReg() == X86::EFLAGS ||
721  MI.getOperand(1).getReg() == X86::EFLAGS)) {
722  LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
723  MI.dump());
724  llvm_unreachable("Unlowered EFLAGS copy!");
725  }
726 #endif
727 
728  return true;
729 }
730 
731 /// Collect any conditions that have already been set in registers so that we
732 /// can re-use them rather than adding duplicates.
733 CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs(
735  CondRegArray CondRegs = {};
736 
737  // Scan backwards across the range of instructions with live EFLAGS.
738  for (MachineInstr &MI :
739  llvm::reverse(llvm::make_range(MBB.begin(), TestPos))) {
741  if (Cond != X86::COND_INVALID && !MI.mayStore() &&
742  MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual()) {
743  assert(MI.getOperand(0).isDef() &&
744  "A non-storing SETcc should always define a register!");
745  CondRegs[Cond] = MI.getOperand(0).getReg();
746  }
747 
748  // Stop scanning when we see the first definition of the EFLAGS as prior to
749  // this we would potentially capture the wrong flag state.
750  if (MI.findRegisterDefOperand(X86::EFLAGS))
751  break;
752  }
753  return CondRegs;
754 }
755 
756 Register X86FlagsCopyLoweringPass::promoteCondToReg(
758  const DebugLoc &TestLoc, X86::CondCode Cond) {
759  Register Reg = MRI->createVirtualRegister(PromoteRC);
760  auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
761  TII->get(X86::SETCCr), Reg).addImm(Cond);
762  (void)SetI;
763  LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
764  ++NumSetCCsInserted;
765  return Reg;
766 }
767 
768 std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
770  const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
771  unsigned &CondReg = CondRegs[Cond];
772  unsigned &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
773  if (!CondReg && !InvCondReg)
774  CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
775 
776  if (CondReg)
777  return {CondReg, false};
778  else
779  return {InvCondReg, true};
780 }
781 
782 void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
784  const DebugLoc &Loc, unsigned Reg) {
785  auto TestI =
786  BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
787  (void)TestI;
788  LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
789  ++NumTestsInserted;
790 }
791 
792 void X86FlagsCopyLoweringPass::rewriteArithmetic(
794  const DebugLoc &TestLoc, MachineInstr &MI, MachineOperand &FlagUse,
795  CondRegArray &CondRegs) {
796  // Arithmetic is either reading CF or OF. Figure out which condition we need
797  // to preserve in a register.
799 
800  // The addend to use to reset CF or OF when added to the flag value.
801  int Addend = 0;
802 
803  switch (getMnemonicFromOpcode(MI.getOpcode())) {
805  case FlagArithMnemonic::ADCX:
806  case FlagArithMnemonic::RCL:
807  case FlagArithMnemonic::RCR:
809  case FlagArithMnemonic::SETB:
810  Cond = X86::COND_B; // CF == 1
811  // Set up an addend that when one is added will need a carry due to not
812  // having a higher bit available.
813  Addend = 255;
814  break;
815 
816  case FlagArithMnemonic::ADOX:
817  Cond = X86::COND_O; // OF == 1
818  // Set up an addend that when one is added will turn from positive to
819  // negative and thus overflow in the signed domain.
820  Addend = 127;
821  break;
822  }
823 
824  // Now get a register that contains the value of the flag input to the
825  // arithmetic. We require exactly this flag to simplify the arithmetic
826  // required to materialize it back into the flag.
827  unsigned &CondReg = CondRegs[Cond];
828  if (!CondReg)
829  CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
830 
831  MachineBasicBlock &MBB = *MI.getParent();
832 
833  // Insert an instruction that will set the flag back to the desired value.
834  Register TmpReg = MRI->createVirtualRegister(PromoteRC);
835  auto AddI =
836  BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(), TII->get(X86::ADD8ri))
837  .addDef(TmpReg, RegState::Dead)
838  .addReg(CondReg)
839  .addImm(Addend);
840  (void)AddI;
841  LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
842  ++NumAddsInserted;
843  FlagUse.setIsKill(true);
844 }
845 
846 void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
848  const DebugLoc &TestLoc,
849  MachineInstr &CMovI,
850  MachineOperand &FlagUse,
851  CondRegArray &CondRegs) {
852  // First get the register containing this specific condition.
854  unsigned CondReg;
855  bool Inverted;
856  std::tie(CondReg, Inverted) =
857  getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
858 
859  MachineBasicBlock &MBB = *CMovI.getParent();
860 
861  // Insert a direct test of the saved register.
862  insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
863 
864  // Rewrite the CMov to use the !ZF flag from the test, and then kill its use
865  // of the flags afterward.
866  CMovI.getOperand(CMovI.getDesc().getNumOperands() - 1)
867  .setImm(Inverted ? X86::COND_E : X86::COND_NE);
868  FlagUse.setIsKill(true);
869  LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
870 }
871 
872 void X86FlagsCopyLoweringPass::rewriteFCMov(MachineBasicBlock &TestMBB,
874  const DebugLoc &TestLoc,
875  MachineInstr &CMovI,
876  MachineOperand &FlagUse,
877  CondRegArray &CondRegs) {
878  // First get the register containing this specific condition.
880  unsigned CondReg;
881  bool Inverted;
882  std::tie(CondReg, Inverted) =
883  getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
884 
885  MachineBasicBlock &MBB = *CMovI.getParent();
886 
887  // Insert a direct test of the saved register.
888  insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
889 
890  auto getFCMOVOpcode = [](unsigned Opcode, bool Inverted) {
891  switch (Opcode) {
892  default: llvm_unreachable("Unexpected opcode!");
893  case X86::CMOVBE_Fp32: case X86::CMOVNBE_Fp32:
894  case X86::CMOVB_Fp32: case X86::CMOVNB_Fp32:
895  case X86::CMOVE_Fp32: case X86::CMOVNE_Fp32:
896  case X86::CMOVP_Fp32: case X86::CMOVNP_Fp32:
897  return Inverted ? X86::CMOVE_Fp32 : X86::CMOVNE_Fp32;
898  case X86::CMOVBE_Fp64: case X86::CMOVNBE_Fp64:
899  case X86::CMOVB_Fp64: case X86::CMOVNB_Fp64:
900  case X86::CMOVE_Fp64: case X86::CMOVNE_Fp64:
901  case X86::CMOVP_Fp64: case X86::CMOVNP_Fp64:
902  return Inverted ? X86::CMOVE_Fp64 : X86::CMOVNE_Fp64;
903  case X86::CMOVBE_Fp80: case X86::CMOVNBE_Fp80:
904  case X86::CMOVB_Fp80: case X86::CMOVNB_Fp80:
905  case X86::CMOVE_Fp80: case X86::CMOVNE_Fp80:
906  case X86::CMOVP_Fp80: case X86::CMOVNP_Fp80:
907  return Inverted ? X86::CMOVE_Fp80 : X86::CMOVNE_Fp80;
908  }
909  };
910 
911  // Rewrite the CMov to use the !ZF flag from the test.
912  CMovI.setDesc(TII->get(getFCMOVOpcode(CMovI.getOpcode(), Inverted)));
913  FlagUse.setIsKill(true);
914  LLVM_DEBUG(dbgs() << " fixed fcmov: "; CMovI.dump());
915 }
916 
917 void X86FlagsCopyLoweringPass::rewriteCondJmp(
919  const DebugLoc &TestLoc, MachineInstr &JmpI, CondRegArray &CondRegs) {
920  // First get the register containing this specific condition.
922  unsigned CondReg;
923  bool Inverted;
924  std::tie(CondReg, Inverted) =
925  getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
926 
927  MachineBasicBlock &JmpMBB = *JmpI.getParent();
928 
929  // Insert a direct test of the saved register.
930  insertTest(JmpMBB, JmpI.getIterator(), JmpI.getDebugLoc(), CondReg);
931 
932  // Rewrite the jump to use the !ZF flag from the test, and kill its use of
933  // flags afterward.
934  JmpI.getOperand(1).setImm(Inverted ? X86::COND_E : X86::COND_NE);
935  JmpI.findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
936  LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
937 }
938 
939 void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
940  MachineOperand &FlagUse,
941  MachineInstr &CopyDefI) {
942  // Just replace this copy with the original copy def.
943  MRI->replaceRegWith(MI.getOperand(0).getReg(),
944  CopyDefI.getOperand(0).getReg());
945  MI.eraseFromParent();
946 }
947 
948 void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
950  const DebugLoc &TestLoc,
951  MachineInstr &SetCCI,
952  MachineOperand &FlagUse,
953  CondRegArray &CondRegs) {
955  // Note that we can't usefully rewrite this to the inverse without complex
956  // analysis of the users of the setCC. Largely we rely on duplicates which
957  // could have been avoided already being avoided here.
958  unsigned &CondReg = CondRegs[Cond];
959  if (!CondReg)
960  CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
961 
962  // Rewriting a register def is trivial: we just replace the register and
963  // remove the setcc.
964  if (!SetCCI.mayStore()) {
965  assert(SetCCI.getOperand(0).isReg() &&
966  "Cannot have a non-register defined operand to SETcc!");
967  Register OldReg = SetCCI.getOperand(0).getReg();
968  // Drop Kill flags on the old register before replacing. CondReg may have
969  // a longer live range.
970  MRI->clearKillFlags(OldReg);
971  MRI->replaceRegWith(OldReg, CondReg);
972  SetCCI.eraseFromParent();
973  return;
974  }
975 
976  // Otherwise, we need to emit a store.
977  auto MIB = BuildMI(*SetCCI.getParent(), SetCCI.getIterator(),
978  SetCCI.getDebugLoc(), TII->get(X86::MOV8mr));
979  // Copy the address operands.
980  for (int i = 0; i < X86::AddrNumOperands; ++i)
981  MIB.add(SetCCI.getOperand(i));
982 
983  MIB.addReg(CondReg);
984 
985  MIB.setMemRefs(SetCCI.memoperands());
986 
987  SetCCI.eraseFromParent();
988 }
i
i
Definition: README.txt:29
llvm::AVRCC::COND_INVALID
@ COND_INVALID
Definition: AVRInstrInfo.h:40
llvm::MachineInstr::isBranch
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:855
llvm::createX86FlagsCopyLoweringPass
FunctionPass * createX86FlagsCopyLoweringPass()
Return a pass that lowers EFLAGS copy pseudo instructions.
Definition: X86FlagsCopyLowering.cpp:140
llvm::MachineBasicBlock::pred_begin
pred_iterator pred_begin()
Definition: MachineBasicBlock.h:316
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
MachineInstr.h
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
MachineSSAUpdater.h
Reg
unsigned Reg
Definition: MachineSink.cpp:1558
getCondFromFCMOV
static X86::CondCode getCondFromFCMOV(unsigned Opcode)
Definition: X86FlagsCopyLowering.cpp:342
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::MachineBasicBlock::isLiveIn
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
Definition: MachineBasicBlock.cpp:579
getCondFromBranch
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
Definition: X86AsmBackend.cpp:315
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:158
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
X86Subtarget.h
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
Pass.h
X86InstrBuilder.h
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:500
llvm::X86::COND_BE
@ COND_BE
Definition: X86BaseInfo.h:87
llvm::SmallVector< MachineInstr *, 4 >
Statistic.h
llvm::MachineFunction::end
iterator end()
Definition: MachineFunction.h:818
llvm::X86Subtarget
Definition: X86Subtarget.h:52
llvm::X86::COND_P
@ COND_P
Definition: X86BaseInfo.h:91
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::MachineOperand::setImm
void setImm(int64_t immVal)
Definition: MachineOperand.h:655
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:486
DenseMap.h
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:359
TargetInstrInfo.h
llvm::X86ISD::SBB
@ SBB
Definition: X86ISelLowering.h:401
llvm::MachineInstr::findRegisterUseOperand
MachineOperand * findRegisterUseOperand(Register Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
Definition: MachineInstr.h:1432
llvm::copy
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1651
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::AArch64ISD::ADC
@ ADC
Definition: AArch64ISelLowering.h:76
llvm::X86::getCondFromBranch
CondCode getCondFromBranch(const MachineInstr &MI)
Definition: X86InstrInfo.cpp:2816
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::X86::CondCode
CondCode
Definition: X86BaseInfo.h:80
llvm::X86::COND_INVALID
@ COND_INVALID
Definition: X86BaseInfo.h:107
llvm::MachineBasicBlock::copySuccessor
void copySuccessor(MachineBasicBlock *Orig, succ_iterator I)
Copy a successor (and any probability info) from original block to this block's.
Definition: MachineBasicBlock.cpp:851
DEBUG_TYPE
#define DEBUG_TYPE
Definition: X86FlagsCopyLowering.cpp:66
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1559
llvm::MachineFunctionPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Definition: MachineFunctionPass.cpp:102
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::X86::COND_O
@ COND_O
Definition: X86BaseInfo.h:81
MachineRegisterInfo.h
llvm::MachineBasicBlock::dump
void dump() const
Definition: MachineBasicBlock.cpp:294
r1
__Z6slow4bii r1 movgt r1
Definition: README.txt:62
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringPass, DEBUG_TYPE, "X86 EFLAGS copy lowering", false, false) INITIALIZE_PASS_END(X86FlagsCopyLoweringPass
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
SparseBitVector.h
llvm::X86::getCondFromCMov
CondCode getCondFromCMov(const MachineInstr &MI)
Return condition code of a CMov opcode.
Definition: X86InstrInfo.cpp:2836
llvm::MachineOperand::isKill
bool isKill() const
Definition: MachineOperand.h:390
CommandLine.h
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:644
X86.h
llvm::MachineBasicBlock::succ_end
succ_iterator succ_end()
Definition: MachineBasicBlock.h:334
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::MachineBasicBlock::isSuccessor
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
Definition: MachineBasicBlock.cpp:912
llvm::X86::getCondFromSETCC
CondCode getCondFromSETCC(const MachineInstr &MI)
Return condition code of a SETCC opcode.
Definition: X86InstrInfo.cpp:2826
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MachineOperand::isMBB
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
Definition: MachineOperand.h:329
false
Definition: StackSlotColoring.cpp:142
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
LLVM_EXPAND_INSTR_SIZES
#define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX)
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::make_scope_exit
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
llvm::X86::COND_A
@ COND_A
Definition: X86BaseInfo.h:88
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::MachineFunction::begin
iterator begin()
Definition: MachineFunction.h:816
getMnemonicFromOpcode
static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode)
Definition: X86FlagsCopyLowering.cpp:168
lowering
X86 EFLAGS copy lowering
Definition: X86FlagsCopyLowering.cpp:138
DebugLoc.h
SmallPtrSet.h
Copies
SI Lower i1 Copies
Definition: SILowerI1Copies.cpp:406
llvm::MachineRegisterInfo::getVRegDef
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Definition: MachineRegisterInfo.cpp:398
llvm::MachineRegisterInfo::use_empty
bool use_empty(Register RegNo) const
use_empty - Return true if there are no instructions using the specified register.
Definition: MachineRegisterInfo.h:506
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
splitBlock
static MachineBasicBlock & splitBlock(MachineBasicBlock &MBB, MachineInstr &SplitI, const X86InstrInfo &TII)
Definition: X86FlagsCopyLowering.cpp:242
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::MachineRegisterInfo::clearKillFlags
void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
Definition: MachineRegisterInfo.cpp:429
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:634
llvm::MachineInstr::getDebugLoc
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:418
TargetSchedule.h
MCSchedule.h
llvm::MachineBasicBlock::pred_end
pred_iterator pred_end()
Definition: MachineBasicBlock.h:318
llvm::X86::COND_AE
@ COND_AE
Definition: X86BaseInfo.h:84
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineOperand::CreateMBB
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
Definition: MachineOperand.h:816
llvm::AVRCC::COND_NE
@ COND_NE
Not equal.
Definition: AVRInstrInfo.h:33
MachineConstantPool.h
ArrayRef.h
MachineFunctionPass.h
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:542
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineBasicBlock::succ_begin
succ_iterator succ_begin()
Definition: MachineBasicBlock.h:332
llvm::X86::COND_B
@ COND_B
Definition: X86BaseInfo.h:83
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:225
MachineModuleInfo.h
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::MachineBasicBlock::predecessors
iterator_range< pred_iterator > predecessors()
Definition: MachineBasicBlock.h:349
llvm::MachineBasicBlock::instr_begin
instr_iterator instr_begin()
Definition: MachineBasicBlock.h:252
llvm::MachineBasicBlock::pred_empty
bool pred_empty() const
Definition: MachineBasicBlock.h:331
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::MachineBasicBlock::instr_end
instr_iterator instr_end()
Definition: MachineBasicBlock.h:254
llvm::MachineFunction
Definition: MachineFunction.h:234
llvm::MachineInstr::dump
void dump() const
Definition: MachineInstr.cpp:1541
llvm::X86InstrInfo
Definition: X86InstrInfo.h:130
llvm::MachineBasicBlock::getFirstTerminator
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Definition: MachineBasicBlock.cpp:241
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:552
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1607
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:179
llvm::MachineBasicBlock::successors
iterator_range< succ_iterator > successors()
Definition: MachineBasicBlock.h:355
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
llvm::MachineBasicBlock::splice
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
Definition: MachineBasicBlock.h:950
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:489
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
TargetSubtargetInfo.h
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:286
llvm::Pass::dump
void dump() const
Definition: Pass.cpp:131
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1627
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:380
llvm::X86::AddrNumOperands
@ AddrNumOperands
AddrNumOperands - Total number of operands in a memory reference.
Definition: X86BaseInfo.h:41
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::MachineBasicBlock::replaceSuccessor
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
Definition: MachineBasicBlock.cpp:811
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1314
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
llvm::MachineInstr::mayStore
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:1018
llvm::MachineBasicBlock::normalizeSuccProbs
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
Definition: MachineBasicBlock.h:654
llvm::ReversePostOrderTraversal
Definition: PostOrderIterator.h:290
llvm::MachineBasicBlock::getFallThrough
MachineBasicBlock * getFallThrough()
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
Definition: MachineBasicBlock.cpp:921
PostOrderIterator.h
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::MachineOperand::setMBB
void setMBB(MachineBasicBlock *MBB)
Definition: MachineOperand.h:689
ScopeExit.h
llvm::MachineInstr::memoperands
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:690
MachineOperand.h
llvm::MachineInstr::setDesc
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
Definition: MachineInstr.h:1742
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
MSP430CC::COND_E
@ COND_E
Definition: MSP430.h:23
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVM_EXPAND_ADC_SBB_INSTR
#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC)
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:46
MachineFunction.h
X86InstrInfo.h
llvm::MachineInstr::eraseFromParent
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Definition: MachineInstr.cpp:680
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:228
TargetRegisterInfo.h
Debug.h
llvm::X86::COND_NP
@ COND_NP
Definition: X86BaseInfo.h:92
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
MachineDominators.h
SmallSet.h
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::M68k::GetOppositeBranchCondition
static M68k::CondCode GetOppositeBranchCondition(M68k::CondCode CC)
Definition: M68kInstrInfo.h:58