LLVM 18.0.0git
X86FlagsCopyLowering.cpp
Go to the documentation of this file.
1//====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual
11/// flag bits.
12///
13/// We have to do this by carefully analyzing and rewriting the usage of the
14/// copied EFLAGS register because there is no general way to rematerialize the
15/// entire EFLAGS register safely and efficiently. Using `popf` both forces
16/// dynamic stack adjustment and can create correctness issues due to IF, TF,
17/// and other non-status flags being overwritten. Using sequences involving
18/// SAHF don't work on all x86 processors and are often quite slow compared to
19/// directly testing a single status preserved in its own GPR.
20///
21//===----------------------------------------------------------------------===//
22
23#include "X86.h"
24#include "X86InstrBuilder.h"
25#include "X86InstrInfo.h"
26#include "X86Subtarget.h"
27#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/STLExtras.h"
30#include "llvm/ADT/ScopeExit.h"
33#include "llvm/ADT/Statistic.h"
49#include "llvm/IR/DebugLoc.h"
50#include "llvm/MC/MCSchedule.h"
51#include "llvm/Pass.h"
53#include "llvm/Support/Debug.h"
55#include <algorithm>
56#include <cassert>
57#include <iterator>
58#include <utility>
59
60using namespace llvm;
61
62#define PASS_KEY "x86-flags-copy-lowering"
63#define DEBUG_TYPE PASS_KEY
64
65STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated");
66STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted");
67STATISTIC(NumTestsInserted, "Number of test instructions inserted");
68STATISTIC(NumAddsInserted, "Number of adds instructions inserted");
69
70namespace {
71
72// Convenient array type for storing registers associated with each condition.
73using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>;
74
75class X86FlagsCopyLoweringPass : public MachineFunctionPass {
76public:
77 X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) { }
78
79 StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
80 bool runOnMachineFunction(MachineFunction &MF) override;
81 void getAnalysisUsage(AnalysisUsage &AU) const override;
82
83 /// Pass identification, replacement for typeid.
84 static char ID;
85
86private:
87 MachineRegisterInfo *MRI = nullptr;
88 const X86Subtarget *Subtarget = nullptr;
89 const X86InstrInfo *TII = nullptr;
90 const TargetRegisterInfo *TRI = nullptr;
91 const TargetRegisterClass *PromoteRC = nullptr;
92 MachineDominatorTree *MDT = nullptr;
93
94 CondRegArray collectCondsInRegs(MachineBasicBlock &MBB,
96
97 Register promoteCondToReg(MachineBasicBlock &MBB,
99 const DebugLoc &TestLoc, X86::CondCode Cond);
100 std::pair<unsigned, bool> getCondOrInverseInReg(
102 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs);
104 const DebugLoc &Loc, unsigned Reg);
105
106 void rewriteArithmetic(MachineBasicBlock &TestMBB,
108 const DebugLoc &TestLoc, MachineInstr &MI,
109 MachineOperand &FlagUse, CondRegArray &CondRegs);
110 void rewriteCMov(MachineBasicBlock &TestMBB,
111 MachineBasicBlock::iterator TestPos, const DebugLoc &TestLoc,
112 MachineInstr &CMovI, MachineOperand &FlagUse,
113 CondRegArray &CondRegs);
114 void rewriteFCMov(MachineBasicBlock &TestMBB,
116 const DebugLoc &TestLoc, MachineInstr &CMovI,
117 MachineOperand &FlagUse, CondRegArray &CondRegs);
118 void rewriteCondJmp(MachineBasicBlock &TestMBB,
120 const DebugLoc &TestLoc, MachineInstr &JmpI,
121 CondRegArray &CondRegs);
122 void rewriteCopy(MachineInstr &MI, MachineOperand &FlagUse,
123 MachineInstr &CopyDefI);
124 void rewriteSetCC(MachineBasicBlock &TestMBB,
126 const DebugLoc &TestLoc, MachineInstr &SetCCI,
127 MachineOperand &FlagUse, CondRegArray &CondRegs);
128};
129
130} // end anonymous namespace
131
132INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringPass, DEBUG_TYPE,
133 "X86 EFLAGS copy lowering", false, false)
134INITIALIZE_PASS_END(X86FlagsCopyLoweringPass, DEBUG_TYPE,
135 "X86 EFLAGS copy lowering", false, false)
136
138 return new X86FlagsCopyLoweringPass();
139}
140
141char X86FlagsCopyLoweringPass::ID = 0;
142
143void X86FlagsCopyLoweringPass::getAnalysisUsage(AnalysisUsage &AU) const {
146}
147
148namespace {
149/// An enumeration of the arithmetic instruction mnemonics which have
150/// interesting flag semantics.
151///
152/// We can map instruction opcodes into these mnemonics to make it easy to
153/// dispatch with specific functionality.
154enum class FlagArithMnemonic {
155 ADC,
156 RCL,
157 RCR,
158 SBB,
159 SETB,
160};
161} // namespace
162
163static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) {
164 switch (Opcode) {
165 default:
166 report_fatal_error("No support for lowering a copy into EFLAGS when used "
167 "by this instruction!");
168
169#define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX) \
170 case X86::MNEMONIC##8##SUFFIX: \
171 case X86::MNEMONIC##16##SUFFIX: \
172 case X86::MNEMONIC##32##SUFFIX: \
173 case X86::MNEMONIC##64##SUFFIX:
174
175#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \
176 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \
177 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr_REV) \
178 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \
179 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \
180 case X86::MNEMONIC##8ri: \
181 case X86::MNEMONIC##16ri8: \
182 case X86::MNEMONIC##32ri8: \
183 case X86::MNEMONIC##64ri8: \
184 case X86::MNEMONIC##16ri: \
185 case X86::MNEMONIC##32ri: \
186 case X86::MNEMONIC##64ri32: \
187 case X86::MNEMONIC##8mi: \
188 case X86::MNEMONIC##16mi8: \
189 case X86::MNEMONIC##32mi8: \
190 case X86::MNEMONIC##64mi8: \
191 case X86::MNEMONIC##16mi: \
192 case X86::MNEMONIC##32mi: \
193 case X86::MNEMONIC##64mi32: \
194 case X86::MNEMONIC##8i8: \
195 case X86::MNEMONIC##16i16: \
196 case X86::MNEMONIC##32i32: \
197 case X86::MNEMONIC##64i32:
198
200 return FlagArithMnemonic::ADC;
201
203 return FlagArithMnemonic::SBB;
204
205#undef LLVM_EXPAND_ADC_SBB_INSTR
206
210 return FlagArithMnemonic::RCL;
211
215 return FlagArithMnemonic::RCR;
216
217#undef LLVM_EXPAND_INSTR_SIZES
218
219 case X86::SETB_C32r:
220 case X86::SETB_C64r:
221 return FlagArithMnemonic::SETB;
222 }
223}
224
226 MachineInstr &SplitI,
227 const X86InstrInfo &TII) {
229
230 assert(SplitI.getParent() == &MBB &&
231 "Split instruction must be in the split block!");
232 assert(SplitI.isBranch() &&
233 "Only designed to split a tail of branch instructions!");
235 "Must split on an actual jCC instruction!");
236
237 // Dig out the previous instruction to the split point.
238 MachineInstr &PrevI = *std::prev(SplitI.getIterator());
239 assert(PrevI.isBranch() && "Must split after a branch!");
241 "Must split after an actual jCC instruction!");
242 assert(!std::prev(PrevI.getIterator())->isTerminator() &&
243 "Must only have this one terminator prior to the split!");
244
245 // Grab the one successor edge that will stay in `MBB`.
246 MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(0).getMBB();
247
248 // Analyze the original block to see if we are actually splitting an edge
249 // into two edges. This can happen when we have multiple conditional jumps to
250 // the same successor.
251 bool IsEdgeSplit =
252 std::any_of(SplitI.getIterator(), MBB.instr_end(),
253 [&](MachineInstr &MI) {
254 assert(MI.isTerminator() &&
255 "Should only have spliced terminators!");
256 return llvm::any_of(
257 MI.operands(), [&](MachineOperand &MOp) {
258 return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc;
259 });
260 }) ||
261 MBB.getFallThrough() == &UnsplitSucc;
262
263 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
264
265 // Insert the new block immediately after the current one. Any existing
266 // fallthrough will be sunk into this new block anyways.
267 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
268
269 // Splice the tail of instructions into the new block.
270 NewMBB.splice(NewMBB.end(), &MBB, SplitI.getIterator(), MBB.end());
271
272 // Copy the necessary succesors (and their probability info) into the new
273 // block.
274 for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
275 if (IsEdgeSplit || *SI != &UnsplitSucc)
276 NewMBB.copySuccessor(&MBB, SI);
277 // Normalize the probabilities if we didn't end up splitting the edge.
278 if (!IsEdgeSplit)
279 NewMBB.normalizeSuccProbs();
280
281 // Now replace all of the moved successors in the original block with the new
282 // block. This will merge their probabilities.
283 for (MachineBasicBlock *Succ : NewMBB.successors())
284 if (Succ != &UnsplitSucc)
285 MBB.replaceSuccessor(Succ, &NewMBB);
286
287 // We should always end up replacing at least one successor.
288 assert(MBB.isSuccessor(&NewMBB) &&
289 "Failed to make the new block a successor!");
290
291 // Now update all the PHIs.
292 for (MachineBasicBlock *Succ : NewMBB.successors()) {
293 for (MachineInstr &MI : *Succ) {
294 if (!MI.isPHI())
295 break;
296
297 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
298 OpIdx += 2) {
299 MachineOperand &OpV = MI.getOperand(OpIdx);
300 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
301 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
302 if (OpMBB.getMBB() != &MBB)
303 continue;
304
305 // Replace the operand for unsplit successors
306 if (!IsEdgeSplit || Succ != &UnsplitSucc) {
307 OpMBB.setMBB(&NewMBB);
308
309 // We have to continue scanning as there may be multiple entries in
310 // the PHI.
311 continue;
312 }
313
314 // When we have split the edge append a new successor.
315 MI.addOperand(MF, OpV);
316 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
317 break;
318 }
319 }
320 }
321
322 return NewMBB;
323}
324
326 switch (Opcode) {
327 default: return X86::COND_INVALID;
328 case X86::CMOVBE_Fp32: case X86::CMOVBE_Fp64: case X86::CMOVBE_Fp80:
329 return X86::COND_BE;
330 case X86::CMOVB_Fp32: case X86::CMOVB_Fp64: case X86::CMOVB_Fp80:
331 return X86::COND_B;
332 case X86::CMOVE_Fp32: case X86::CMOVE_Fp64: case X86::CMOVE_Fp80:
333 return X86::COND_E;
334 case X86::CMOVNBE_Fp32: case X86::CMOVNBE_Fp64: case X86::CMOVNBE_Fp80:
335 return X86::COND_A;
336 case X86::CMOVNB_Fp32: case X86::CMOVNB_Fp64: case X86::CMOVNB_Fp80:
337 return X86::COND_AE;
338 case X86::CMOVNE_Fp32: case X86::CMOVNE_Fp64: case X86::CMOVNE_Fp80:
339 return X86::COND_NE;
340 case X86::CMOVNP_Fp32: case X86::CMOVNP_Fp64: case X86::CMOVNP_Fp80:
341 return X86::COND_NP;
342 case X86::CMOVP_Fp32: case X86::CMOVP_Fp64: case X86::CMOVP_Fp80:
343 return X86::COND_P;
344 }
345}
346
347bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
348 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
349 << " **********\n");
350
351 Subtarget = &MF.getSubtarget<X86Subtarget>();
352 MRI = &MF.getRegInfo();
353 TII = Subtarget->getInstrInfo();
354 TRI = Subtarget->getRegisterInfo();
355 MDT = &getAnalysis<MachineDominatorTree>();
356 PromoteRC = &X86::GR8RegClass;
357
358 if (MF.begin() == MF.end())
359 // Nothing to do for a degenerate empty function...
360 return false;
361
362 // Collect the copies in RPO so that when there are chains where a copy is in
363 // turn copied again we visit the first one first. This ensures we can find
364 // viable locations for testing the original EFLAGS that dominate all the
365 // uses across complex CFGs.
368 for (MachineBasicBlock *MBB : RPOT)
369 for (MachineInstr &MI : *MBB)
370 if (MI.getOpcode() == TargetOpcode::COPY &&
371 MI.getOperand(0).getReg() == X86::EFLAGS)
372 Copies.push_back(&MI);
373
374 for (MachineInstr *CopyI : Copies) {
375 MachineBasicBlock &MBB = *CopyI->getParent();
376
377 MachineOperand &VOp = CopyI->getOperand(1);
378 assert(VOp.isReg() &&
379 "The input to the copy for EFLAGS should always be a register!");
380 MachineInstr &CopyDefI = *MRI->getVRegDef(VOp.getReg());
381 if (CopyDefI.getOpcode() != TargetOpcode::COPY) {
382 // FIXME: The big likely candidate here are PHI nodes. We could in theory
383 // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard
384 // enough that it is probably better to change every other part of LLVM
385 // to avoid creating them. The issue is that once we have PHIs we won't
386 // know which original EFLAGS value we need to capture with our setCCs
387 // below. The end result will be computing a complete set of setCCs that
388 // we *might* want, computing them in every place where we copy *out* of
389 // EFLAGS and then doing SSA formation on all of them to insert necessary
390 // PHI nodes and consume those here. Then hoping that somehow we DCE the
391 // unnecessary ones. This DCE seems very unlikely to be successful and so
392 // we will almost certainly end up with a glut of dead setCC
393 // instructions. Until we have a motivating test case and fail to avoid
394 // it by changing other parts of LLVM's lowering, we refuse to handle
395 // this complex case here.
397 dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
398 CopyDefI.dump());
400 "Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
401 }
402
403 auto Cleanup = make_scope_exit([&] {
404 // All uses of the EFLAGS copy are now rewritten, kill the copy into
405 // eflags and if dead the copy from.
406 CopyI->eraseFromParent();
407 if (MRI->use_empty(CopyDefI.getOperand(0).getReg()))
408 CopyDefI.eraseFromParent();
409 ++NumCopiesEliminated;
410 });
411
412 MachineOperand &DOp = CopyI->getOperand(0);
413 assert(DOp.isDef() && "Expected register def!");
414 assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!");
415 if (DOp.isDead())
416 continue;
417
418 MachineBasicBlock *TestMBB = CopyDefI.getParent();
419 auto TestPos = CopyDefI.getIterator();
420 DebugLoc TestLoc = CopyDefI.getDebugLoc();
421
422 LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
423
424 // Walk up across live-in EFLAGS to find where they were actually def'ed.
425 //
426 // This copy's def may just be part of a region of blocks covered by
427 // a single def of EFLAGS and we want to find the top of that region where
428 // possible.
429 //
430 // This is essentially a search for a *candidate* reaching definition
431 // location. We don't need to ever find the actual reaching definition here,
432 // but we want to walk up the dominator tree to find the highest point which
433 // would be viable for such a definition.
434 auto HasEFLAGSClobber = [&](MachineBasicBlock::iterator Begin,
436 // Scan backwards as we expect these to be relatively short and often find
437 // a clobber near the end.
438 return llvm::any_of(
440 // Flag any instruction (other than the copy we are
441 // currently rewriting) that defs EFLAGS.
442 return &MI != CopyI && MI.findRegisterDefOperand(X86::EFLAGS);
443 });
444 };
445 auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB,
446 MachineBasicBlock *EndMBB) {
447 assert(MDT->dominates(BeginMBB, EndMBB) &&
448 "Only support paths down the dominator tree!");
451 // We terminate at the beginning. No need to scan it.
452 Visited.insert(BeginMBB);
453 Worklist.push_back(EndMBB);
454 do {
455 auto *MBB = Worklist.pop_back_val();
456 for (auto *PredMBB : MBB->predecessors()) {
457 if (!Visited.insert(PredMBB).second)
458 continue;
459 if (HasEFLAGSClobber(PredMBB->begin(), PredMBB->end()))
460 return true;
461 // Enqueue this block to walk its predecessors.
462 Worklist.push_back(PredMBB);
463 }
464 } while (!Worklist.empty());
465 // No clobber found along a path from the begin to end.
466 return false;
467 };
468 while (TestMBB->isLiveIn(X86::EFLAGS) && !TestMBB->pred_empty() &&
469 !HasEFLAGSClobber(TestMBB->begin(), TestPos)) {
470 // Find the nearest common dominator of the predecessors, as
471 // that will be the best candidate to hoist into.
472 MachineBasicBlock *HoistMBB =
473 std::accumulate(std::next(TestMBB->pred_begin()), TestMBB->pred_end(),
474 *TestMBB->pred_begin(),
475 [&](MachineBasicBlock *LHS, MachineBasicBlock *RHS) {
476 return MDT->findNearestCommonDominator(LHS, RHS);
477 });
478
479 // Now we need to scan all predecessors that may be reached along paths to
480 // the hoist block. A clobber anywhere in any of these blocks the hoist.
481 // Note that this even handles loops because we require *no* clobbers.
482 if (HasEFLAGSClobberPath(HoistMBB, TestMBB))
483 break;
484
485 // We also need the terminators to not sneakily clobber flags.
486 if (HasEFLAGSClobber(HoistMBB->getFirstTerminator()->getIterator(),
487 HoistMBB->instr_end()))
488 break;
489
490 // We found a viable location, hoist our test position to it.
491 TestMBB = HoistMBB;
492 TestPos = TestMBB->getFirstTerminator()->getIterator();
493 // Clear the debug location as it would just be confusing after hoisting.
494 TestLoc = DebugLoc();
495 }
496 LLVM_DEBUG({
497 auto DefIt = llvm::find_if(
498 llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)),
499 [&](MachineInstr &MI) {
500 return MI.findRegisterDefOperand(X86::EFLAGS);
501 });
502 if (DefIt.base() != TestMBB->instr_begin()) {
503 dbgs() << " Using EFLAGS defined by: ";
504 DefIt->dump();
505 } else {
506 dbgs() << " Using live-in flags for BB:\n";
507 TestMBB->dump();
508 }
509 });
510
511 // While rewriting uses, we buffer jumps and rewrite them in a second pass
512 // because doing so will perturb the CFG that we are walking to find the
513 // uses in the first place.
515
516 // Gather the condition flags that have already been preserved in
517 // registers. We do this from scratch each time as we expect there to be
518 // very few of them and we expect to not revisit the same copy definition
519 // many times. If either of those change sufficiently we could build a map
520 // of these up front instead.
521 CondRegArray CondRegs = collectCondsInRegs(*TestMBB, TestPos);
522
523 // Collect the basic blocks we need to scan. Typically this will just be
524 // a single basic block but we may have to scan multiple blocks if the
525 // EFLAGS copy lives into successors.
528 Blocks.push_back(&MBB);
529
530 do {
531 MachineBasicBlock &UseMBB = *Blocks.pop_back_val();
532
533 // Track when if/when we find a kill of the flags in this block.
534 bool FlagsKilled = false;
535
536 // In most cases, we walk from the beginning to the end of the block. But
537 // when the block is the same block as the copy is from, we will visit it
538 // twice. The first time we start from the copy and go to the end. The
539 // second time we start from the beginning and go to the copy. This lets
540 // us handle copies inside of cycles.
541 // FIXME: This loop is *super* confusing. This is at least in part
542 // a symptom of all of this routine needing to be refactored into
543 // documentable components. Once done, there may be a better way to write
544 // this loop.
545 for (auto MII = (&UseMBB == &MBB && !VisitedBlocks.count(&UseMBB))
546 ? std::next(CopyI->getIterator())
547 : UseMBB.instr_begin(),
548 MIE = UseMBB.instr_end();
549 MII != MIE;) {
550 MachineInstr &MI = *MII++;
551 // If we are in the original copy block and encounter either the copy
552 // def or the copy itself, break so that we don't re-process any part of
553 // the block or process the instructions in the range that was copied
554 // over.
555 if (&MI == CopyI || &MI == &CopyDefI) {
556 assert(&UseMBB == &MBB && VisitedBlocks.count(&MBB) &&
557 "Should only encounter these on the second pass over the "
558 "original block.");
559 break;
560 }
561
562 MachineOperand *FlagUse = MI.findRegisterUseOperand(X86::EFLAGS);
563 if (!FlagUse) {
564 if (MI.findRegisterDefOperand(X86::EFLAGS)) {
565 // If EFLAGS are defined, it's as-if they were killed. We can stop
566 // scanning here.
567 //
568 // NB!!! Many instructions only modify some flags. LLVM currently
569 // models this as clobbering all flags, but if that ever changes
570 // this will need to be carefully updated to handle that more
571 // complex logic.
572 FlagsKilled = true;
573 break;
574 }
575 continue;
576 }
577
578 LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
579
580 // Check the kill flag before we rewrite as that may change it.
581 if (FlagUse->isKill())
582 FlagsKilled = true;
583
584 // Once we encounter a branch, the rest of the instructions must also be
585 // branches. We can't rewrite in place here, so we handle them below.
586 //
587 // Note that we don't have to handle tail calls here, even conditional
588 // tail calls, as those are not introduced into the X86 MI until post-RA
589 // branch folding or black placement. As a consequence, we get to deal
590 // with the simpler formulation of conditional branches followed by tail
591 // calls.
593 auto JmpIt = MI.getIterator();
594 do {
595 JmpIs.push_back(&*JmpIt);
596 ++JmpIt;
597 } while (JmpIt != UseMBB.instr_end() &&
598 X86::getCondFromBranch(*JmpIt) !=
600 break;
601 }
602
603 // Otherwise we can just rewrite in-place.
605 rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
606 } else if (getCondFromFCMOV(MI.getOpcode()) != X86::COND_INVALID) {
607 rewriteFCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
609 rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
610 } else if (MI.getOpcode() == TargetOpcode::COPY) {
611 rewriteCopy(MI, *FlagUse, CopyDefI);
612 } else {
613 // We assume all other instructions that use flags also def them.
614 assert(MI.findRegisterDefOperand(X86::EFLAGS) &&
615 "Expected a def of EFLAGS for this instruction!");
616
617 // NB!!! Several arithmetic instructions only *partially* update
618 // flags. Theoretically, we could generate MI code sequences that
619 // would rely on this fact and observe different flags independently.
620 // But currently LLVM models all of these instructions as clobbering
621 // all the flags in an undef way. We rely on that to simplify the
622 // logic.
623 FlagsKilled = true;
624
625 // Generically handle remaining uses as arithmetic instructions.
626 rewriteArithmetic(*TestMBB, TestPos, TestLoc, MI, *FlagUse,
627 CondRegs);
628 }
629
630 // If this was the last use of the flags, we're done.
631 if (FlagsKilled)
632 break;
633 }
634
635 // If the flags were killed, we're done with this block.
636 if (FlagsKilled)
637 continue;
638
639 // Otherwise we need to scan successors for ones where the flags live-in
640 // and queue those up for processing.
641 for (MachineBasicBlock *SuccMBB : UseMBB.successors())
642 if (SuccMBB->isLiveIn(X86::EFLAGS) &&
643 VisitedBlocks.insert(SuccMBB).second) {
644 // We currently don't do any PHI insertion and so we require that the
645 // test basic block dominates all of the use basic blocks. Further, we
646 // can't have a cycle from the test block back to itself as that would
647 // create a cycle requiring a PHI to break it.
648 //
649 // We could in theory do PHI insertion here if it becomes useful by
650 // just taking undef values in along every edge that we don't trace
651 // this EFLAGS copy along. This isn't as bad as fully general PHI
652 // insertion, but still seems like a great deal of complexity.
653 //
654 // Because it is theoretically possible that some earlier MI pass or
655 // other lowering transformation could induce this to happen, we do
656 // a hard check even in non-debug builds here.
657 if (SuccMBB == TestMBB || !MDT->dominates(TestMBB, SuccMBB)) {
658 LLVM_DEBUG({
659 dbgs()
660 << "ERROR: Encountered use that is not dominated by our test "
661 "basic block! Rewriting this would require inserting PHI "
662 "nodes to track the flag state across the CFG.\n\nTest "
663 "block:\n";
664 TestMBB->dump();
665 dbgs() << "Use block:\n";
666 SuccMBB->dump();
667 });
669 "Cannot lower EFLAGS copy when original copy def "
670 "does not dominate all uses.");
671 }
672
673 Blocks.push_back(SuccMBB);
674
675 // After this, EFLAGS will be recreated before each use.
676 SuccMBB->removeLiveIn(X86::EFLAGS);
677 }
678 } while (!Blocks.empty());
679
680 // Now rewrite the jumps that use the flags. These we handle specially
681 // because if there are multiple jumps in a single basic block we'll have
682 // to do surgery on the CFG.
683 MachineBasicBlock *LastJmpMBB = nullptr;
684 for (MachineInstr *JmpI : JmpIs) {
685 // Past the first jump within a basic block we need to split the blocks
686 // apart.
687 if (JmpI->getParent() == LastJmpMBB)
688 splitBlock(*JmpI->getParent(), *JmpI, *TII);
689 else
690 LastJmpMBB = JmpI->getParent();
691
692 rewriteCondJmp(*TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
693 }
694
695 // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
696 // the copy's def operand is itself a kill.
697 }
698
699#ifndef NDEBUG
700 for (MachineBasicBlock &MBB : MF)
701 for (MachineInstr &MI : MBB)
702 if (MI.getOpcode() == TargetOpcode::COPY &&
703 (MI.getOperand(0).getReg() == X86::EFLAGS ||
704 MI.getOperand(1).getReg() == X86::EFLAGS)) {
705 LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
706 MI.dump());
707 llvm_unreachable("Unlowered EFLAGS copy!");
708 }
709#endif
710
711 return true;
712}
713
714/// Collect any conditions that have already been set in registers so that we
715/// can re-use them rather than adding duplicates.
716CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs(
718 CondRegArray CondRegs = {};
719
720 // Scan backwards across the range of instructions with live EFLAGS.
721 for (MachineInstr &MI :
724 if (Cond != X86::COND_INVALID && !MI.mayStore() &&
725 MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual()) {
726 assert(MI.getOperand(0).isDef() &&
727 "A non-storing SETcc should always define a register!");
728 CondRegs[Cond] = MI.getOperand(0).getReg();
729 }
730
731 // Stop scanning when we see the first definition of the EFLAGS as prior to
732 // this we would potentially capture the wrong flag state.
733 if (MI.findRegisterDefOperand(X86::EFLAGS))
734 break;
735 }
736 return CondRegs;
737}
738
739Register X86FlagsCopyLoweringPass::promoteCondToReg(
741 const DebugLoc &TestLoc, X86::CondCode Cond) {
742 Register Reg = MRI->createVirtualRegister(PromoteRC);
743 auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
744 TII->get(X86::SETCCr), Reg).addImm(Cond);
745 (void)SetI;
746 LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
747 ++NumSetCCsInserted;
748 return Reg;
749}
750
751std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
753 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
754 unsigned &CondReg = CondRegs[Cond];
755 unsigned &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
756 if (!CondReg && !InvCondReg)
757 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
758
759 if (CondReg)
760 return {CondReg, false};
761 else
762 return {InvCondReg, true};
763}
764
765void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
767 const DebugLoc &Loc, unsigned Reg) {
768 auto TestI =
769 BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
770 (void)TestI;
771 LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
772 ++NumTestsInserted;
773}
774
775void X86FlagsCopyLoweringPass::rewriteArithmetic(
777 const DebugLoc &TestLoc, MachineInstr &MI, MachineOperand &FlagUse,
778 CondRegArray &CondRegs) {
779 // Arithmetic is either reading CF or OF. Figure out which condition we need
780 // to preserve in a register.
782
783 // The addend to use to reset CF or OF when added to the flag value.
784 int Addend = 0;
785
786 switch (getMnemonicFromOpcode(MI.getOpcode())) {
787 case FlagArithMnemonic::ADC:
788 case FlagArithMnemonic::RCL:
789 case FlagArithMnemonic::RCR:
790 case FlagArithMnemonic::SBB:
791 case FlagArithMnemonic::SETB:
792 Cond = X86::COND_B; // CF == 1
793 // Set up an addend that when one is added will need a carry due to not
794 // having a higher bit available.
795 Addend = 255;
796 break;
797 }
798
799 // Now get a register that contains the value of the flag input to the
800 // arithmetic. We require exactly this flag to simplify the arithmetic
801 // required to materialize it back into the flag.
802 unsigned &CondReg = CondRegs[Cond];
803 if (!CondReg)
804 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
805
806 MachineBasicBlock &MBB = *MI.getParent();
807
808 // Insert an instruction that will set the flag back to the desired value.
809 Register TmpReg = MRI->createVirtualRegister(PromoteRC);
810 auto AddI =
811 BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(), TII->get(X86::ADD8ri))
812 .addDef(TmpReg, RegState::Dead)
813 .addReg(CondReg)
814 .addImm(Addend);
815 (void)AddI;
816 LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
817 ++NumAddsInserted;
818 FlagUse.setIsKill(true);
819}
820
821void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
823 const DebugLoc &TestLoc,
824 MachineInstr &CMovI,
825 MachineOperand &FlagUse,
826 CondRegArray &CondRegs) {
827 // First get the register containing this specific condition.
829 unsigned CondReg;
830 bool Inverted;
831 std::tie(CondReg, Inverted) =
832 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
833
834 MachineBasicBlock &MBB = *CMovI.getParent();
835
836 // Insert a direct test of the saved register.
837 insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
838
839 // Rewrite the CMov to use the !ZF flag from the test, and then kill its use
840 // of the flags afterward.
841 CMovI.getOperand(CMovI.getDesc().getNumOperands() - 1)
842 .setImm(Inverted ? X86::COND_E : X86::COND_NE);
843 FlagUse.setIsKill(true);
844 LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
845}
846
847void X86FlagsCopyLoweringPass::rewriteFCMov(MachineBasicBlock &TestMBB,
849 const DebugLoc &TestLoc,
850 MachineInstr &CMovI,
851 MachineOperand &FlagUse,
852 CondRegArray &CondRegs) {
853 // First get the register containing this specific condition.
855 unsigned CondReg;
856 bool Inverted;
857 std::tie(CondReg, Inverted) =
858 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
859
860 MachineBasicBlock &MBB = *CMovI.getParent();
861
862 // Insert a direct test of the saved register.
863 insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
864
865 auto getFCMOVOpcode = [](unsigned Opcode, bool Inverted) {
866 switch (Opcode) {
867 default: llvm_unreachable("Unexpected opcode!");
868 case X86::CMOVBE_Fp32: case X86::CMOVNBE_Fp32:
869 case X86::CMOVB_Fp32: case X86::CMOVNB_Fp32:
870 case X86::CMOVE_Fp32: case X86::CMOVNE_Fp32:
871 case X86::CMOVP_Fp32: case X86::CMOVNP_Fp32:
872 return Inverted ? X86::CMOVE_Fp32 : X86::CMOVNE_Fp32;
873 case X86::CMOVBE_Fp64: case X86::CMOVNBE_Fp64:
874 case X86::CMOVB_Fp64: case X86::CMOVNB_Fp64:
875 case X86::CMOVE_Fp64: case X86::CMOVNE_Fp64:
876 case X86::CMOVP_Fp64: case X86::CMOVNP_Fp64:
877 return Inverted ? X86::CMOVE_Fp64 : X86::CMOVNE_Fp64;
878 case X86::CMOVBE_Fp80: case X86::CMOVNBE_Fp80:
879 case X86::CMOVB_Fp80: case X86::CMOVNB_Fp80:
880 case X86::CMOVE_Fp80: case X86::CMOVNE_Fp80:
881 case X86::CMOVP_Fp80: case X86::CMOVNP_Fp80:
882 return Inverted ? X86::CMOVE_Fp80 : X86::CMOVNE_Fp80;
883 }
884 };
885
886 // Rewrite the CMov to use the !ZF flag from the test.
887 CMovI.setDesc(TII->get(getFCMOVOpcode(CMovI.getOpcode(), Inverted)));
888 FlagUse.setIsKill(true);
889 LLVM_DEBUG(dbgs() << " fixed fcmov: "; CMovI.dump());
890}
891
892void X86FlagsCopyLoweringPass::rewriteCondJmp(
894 const DebugLoc &TestLoc, MachineInstr &JmpI, CondRegArray &CondRegs) {
895 // First get the register containing this specific condition.
897 unsigned CondReg;
898 bool Inverted;
899 std::tie(CondReg, Inverted) =
900 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
901
902 MachineBasicBlock &JmpMBB = *JmpI.getParent();
903
904 // Insert a direct test of the saved register.
905 insertTest(JmpMBB, JmpI.getIterator(), JmpI.getDebugLoc(), CondReg);
906
907 // Rewrite the jump to use the !ZF flag from the test, and kill its use of
908 // flags afterward.
909 JmpI.getOperand(1).setImm(Inverted ? X86::COND_E : X86::COND_NE);
910 JmpI.findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
911 LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
912}
913
914void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
915 MachineOperand &FlagUse,
916 MachineInstr &CopyDefI) {
917 // Just replace this copy with the original copy def.
918 MRI->replaceRegWith(MI.getOperand(0).getReg(),
919 CopyDefI.getOperand(0).getReg());
920 MI.eraseFromParent();
921}
922
923void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
925 const DebugLoc &TestLoc,
926 MachineInstr &SetCCI,
927 MachineOperand &FlagUse,
928 CondRegArray &CondRegs) {
930 // Note that we can't usefully rewrite this to the inverse without complex
931 // analysis of the users of the setCC. Largely we rely on duplicates which
932 // could have been avoided already being avoided here.
933 unsigned &CondReg = CondRegs[Cond];
934 if (!CondReg)
935 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
936
937 // Rewriting a register def is trivial: we just replace the register and
938 // remove the setcc.
939 if (!SetCCI.mayStore()) {
940 assert(SetCCI.getOperand(0).isReg() &&
941 "Cannot have a non-register defined operand to SETcc!");
942 Register OldReg = SetCCI.getOperand(0).getReg();
943 // Drop Kill flags on the old register before replacing. CondReg may have
944 // a longer live range.
945 MRI->clearKillFlags(OldReg);
946 MRI->replaceRegWith(OldReg, CondReg);
947 SetCCI.eraseFromParent();
948 return;
949 }
950
951 // Otherwise, we need to emit a store.
952 auto MIB = BuildMI(*SetCCI.getParent(), SetCCI.getIterator(),
953 SetCCI.getDebugLoc(), TII->get(X86::MOV8mr));
954 // Copy the address operands.
955 for (int i = 0; i < X86::AddrNumOperands; ++i)
956 MIB.add(SetCCI.getOperand(i));
957
958 MIB.addReg(CondReg);
959
960 MIB.setMemRefs(SetCCI.memoperands());
961
962 SetCCI.eraseFromParent();
963}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
#define LLVM_DEBUG(X)
Definition: Debug.h:101
bool End
Definition: ELF_riscv.cpp:478
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:505
static const HTTPClientCleanup Cleanup
Definition: HTTPClient.cpp:42
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
static void r1(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E, int I, uint32_t *Buf)
Definition: SHA1.cpp:45
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Lower i1 Copies
This file contains some templates that are useful if you are working with the STL at all.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static X86::CondCode getCondFromFCMOV(unsigned Opcode)
#define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX)
static MachineBasicBlock & splitBlock(MachineBasicBlock &MBB, MachineInstr &SplitI, const X86InstrInfo &TII)
static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode)
#define DEBUG_TYPE
#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC)
X86 EFLAGS copy lowering
static constexpr uint32_t Opcode
Definition: aarch32.h:200
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
instr_iterator instr_begin()
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
MachineBasicBlock * getFallThrough(bool JumpToFallThrough=true)
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
void copySuccessor(const MachineBasicBlock *Orig, succ_iterator I)
Copy a successor (and any probability info) from original block to this block's.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:947
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:540
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:756
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:472
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
MachineOperand * findRegisterUseOperand(Register Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void dump() const
Definition: Pass.cpp:136
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Dead
Unused definition.
Reg
All possible values of the reg field in the ModR/M byte.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
@ AddrNumOperands
Definition: X86BaseInfo.h:36
CondCode getCondFromSETCC(const MachineInstr &MI)
CondCode getCondFromCMov(const MachineInstr &MI)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createX86FlagsCopyLoweringPass()
Return a pass that lowers EFLAGS copy pseudo instructions.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1828
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753