LLVM  7.0.0svn
SelectionDAGISel.cpp
Go to the documentation of this file.
1 //===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAGISel class.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "ScheduleDAGSDNodes.h"
16 #include "SelectionDAGBuilder.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
29 #include "llvm/Analysis/CFG.h"
32 #include "llvm/CodeGen/FastISel.h"
56 #include "llvm/IR/BasicBlock.h"
57 #include "llvm/IR/Constants.h"
58 #include "llvm/IR/DataLayout.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/IR/DiagnosticInfo.h"
62 #include "llvm/IR/Dominators.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/InlineAsm.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/Metadata.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCInstrDesc.h"
75 #include "llvm/MC/MCRegisterInfo.h"
76 #include "llvm/Pass.h"
78 #include "llvm/Support/Casting.h"
79 #include "llvm/Support/CodeGen.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/KnownBits.h"
85 #include "llvm/Support/Timer.h"
91 #include <algorithm>
92 #include <cassert>
93 #include <cstdint>
94 #include <iterator>
95 #include <limits>
96 #include <memory>
97 #include <string>
98 #include <utility>
99 #include <vector>
100 
101 using namespace llvm;
102 
103 #define DEBUG_TYPE "isel"
104 
105 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
106 STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
107 STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
108 STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
109 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
110 STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
111 STATISTIC(NumFastIselFailLowerArguments,
112  "Number of entry blocks where fast isel failed to lower arguments");
113 
115  "fast-isel-abort", cl::Hidden,
116  cl::desc("Enable abort calls when \"fast\" instruction selection "
117  "fails to lower an instruction: 0 disable the abort, 1 will "
118  "abort but for args, calls and terminators, 2 will also "
119  "abort for argument lowering, and 3 will never fallback "
120  "to SelectionDAG."));
121 
123  "fast-isel-report-on-fallback", cl::Hidden,
124  cl::desc("Emit a diagnostic when \"fast\" instruction selection "
125  "falls back to SelectionDAG."));
126 
127 static cl::opt<bool>
128 UseMBPI("use-mbpi",
129  cl::desc("use Machine Branch Probability Info"),
130  cl::init(true), cl::Hidden);
131 
132 #ifndef NDEBUG
134 FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
135  cl::desc("Only display the basic block whose name "
136  "matches this for all view-*-dags options"));
137 static cl::opt<bool>
138 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
139  cl::desc("Pop up a window to show dags before the first "
140  "dag combine pass"));
141 static cl::opt<bool>
142 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
143  cl::desc("Pop up a window to show dags before legalize types"));
144 static cl::opt<bool>
145 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
146  cl::desc("Pop up a window to show dags before legalize"));
147 static cl::opt<bool>
148 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
149  cl::desc("Pop up a window to show dags before the second "
150  "dag combine pass"));
151 static cl::opt<bool>
152 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
153  cl::desc("Pop up a window to show dags before the post legalize types"
154  " dag combine pass"));
155 static cl::opt<bool>
156 ViewISelDAGs("view-isel-dags", cl::Hidden,
157  cl::desc("Pop up a window to show isel dags as they are selected"));
158 static cl::opt<bool>
159 ViewSchedDAGs("view-sched-dags", cl::Hidden,
160  cl::desc("Pop up a window to show sched dags as they are processed"));
161 static cl::opt<bool>
162 ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
163  cl::desc("Pop up a window to show SUnit dags after they are processed"));
164 #else
165 static const bool ViewDAGCombine1 = false,
166  ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
167  ViewDAGCombine2 = false,
168  ViewDAGCombineLT = false,
169  ViewISelDAGs = false, ViewSchedDAGs = false,
170  ViewSUnitDAGs = false;
171 #endif
172 
173 //===---------------------------------------------------------------------===//
174 ///
175 /// RegisterScheduler class - Track the registration of instruction schedulers.
176 ///
177 //===---------------------------------------------------------------------===//
179 
180 //===---------------------------------------------------------------------===//
181 ///
182 /// ISHeuristic command line option for instruction schedulers.
183 ///
184 //===---------------------------------------------------------------------===//
187 ISHeuristic("pre-RA-sched",
189  cl::desc("Instruction schedulers available (before register"
190  " allocation):"));
191 
192 static RegisterScheduler
193 defaultListDAGScheduler("default", "Best scheduler for the target",
195 
196 namespace llvm {
197 
198  //===--------------------------------------------------------------------===//
199  /// \brief This class is used by SelectionDAGISel to temporarily override
200  /// the optimization level on a per-function basis.
202  SelectionDAGISel &IS;
203  CodeGenOpt::Level SavedOptLevel;
204  bool SavedFastISel;
205 
206  public:
208  CodeGenOpt::Level NewOptLevel) : IS(ISel) {
209  SavedOptLevel = IS.OptLevel;
210  if (NewOptLevel == SavedOptLevel)
211  return;
212  IS.OptLevel = NewOptLevel;
213  IS.TM.setOptLevel(NewOptLevel);
214  DEBUG(dbgs() << "\nChanging optimization level for Function "
215  << IS.MF->getFunction().getName() << "\n");
216  DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
217  << " ; After: -O" << NewOptLevel << "\n");
218  SavedFastISel = IS.TM.Options.EnableFastISel;
219  if (NewOptLevel == CodeGenOpt::None) {
221  DEBUG(dbgs() << "\tFastISel is "
222  << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
223  << "\n");
224  }
225  }
226 
228  if (IS.OptLevel == SavedOptLevel)
229  return;
230  DEBUG(dbgs() << "\nRestoring optimization level for Function "
231  << IS.MF->getFunction().getName() << "\n");
232  DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
233  << " ; After: -O" << SavedOptLevel << "\n");
234  IS.OptLevel = SavedOptLevel;
235  IS.TM.setOptLevel(SavedOptLevel);
236  IS.TM.setFastISel(SavedFastISel);
237  }
238  };
239 
240  //===--------------------------------------------------------------------===//
241  /// createDefaultScheduler - This creates an instruction scheduler appropriate
242  /// for the target.
244  CodeGenOpt::Level OptLevel) {
245  const TargetLowering *TLI = IS->TLI;
246  const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
247 
248  // Try first to see if the Target has its own way of selecting a scheduler
249  if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
250  return SchedulerCtor(IS, OptLevel);
251  }
252 
253  if (OptLevel == CodeGenOpt::None ||
256  return createSourceListDAGScheduler(IS, OptLevel);
258  return createBURRListDAGScheduler(IS, OptLevel);
260  return createHybridListDAGScheduler(IS, OptLevel);
261  if (TLI->getSchedulingPreference() == Sched::VLIW)
262  return createVLIWDAGScheduler(IS, OptLevel);
264  "Unknown sched type!");
265  return createILPListDAGScheduler(IS, OptLevel);
266  }
267 
268 } // end namespace llvm
269 
270 // EmitInstrWithCustomInserter - This method should be implemented by targets
271 // that mark instructions with the 'usesCustomInserter' flag. These
272 // instructions are special in various ways, which require special support to
273 // insert. The specified MachineInstr is created but not inserted into any
274 // basic blocks, and this method is called to expand it into a sequence of
275 // instructions, potentially also creating new basic blocks and control flow.
276 // When new basic blocks are inserted and the edges from MBB to its successors
277 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
278 // DenseMap.
281  MachineBasicBlock *MBB) const {
282 #ifndef NDEBUG
283  dbgs() << "If a target marks an instruction with "
284  "'usesCustomInserter', it must implement "
285  "TargetLowering::EmitInstrWithCustomInserter!";
286 #endif
287  llvm_unreachable(nullptr);
288 }
289 
291  SDNode *Node) const {
292  assert(!MI.hasPostISelHook() &&
293  "If a target marks an instruction with 'hasPostISelHook', "
294  "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
295 }
296 
297 //===----------------------------------------------------------------------===//
298 // SelectionDAGISel code
299 //===----------------------------------------------------------------------===//
300 
302  CodeGenOpt::Level OL) :
303  MachineFunctionPass(ID), TM(tm),
304  FuncInfo(new FunctionLoweringInfo()),
305  CurDAG(new SelectionDAG(tm, OL)),
306  SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
307  AA(), GFI(),
308  OptLevel(OL),
309  DAGSize(0) {
316  }
317 
319  delete SDB;
320  delete CurDAG;
321  delete FuncInfo;
322 }
323 
325  if (OptLevel != CodeGenOpt::None)
335 }
336 
337 /// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
338 /// may trap on it. In this case we have to split the edge so that the path
339 /// through the predecessor block that doesn't go to the phi block doesn't
340 /// execute the possibly trapping instruction. If available, we pass domtree
341 /// and loop info to be updated when we split critical edges. This is because
342 /// SelectionDAGISel preserves these analyses.
343 /// This is required for correctness, so it must be done at -O0.
344 ///
346  LoopInfo *LI) {
347  // Loop for blocks with phi nodes.
348  for (BasicBlock &BB : Fn) {
349  PHINode *PN = dyn_cast<PHINode>(BB.begin());
350  if (!PN) continue;
351 
352  ReprocessBlock:
353  // For each block with a PHI node, check to see if any of the input values
354  // are potentially trapping constant expressions. Constant expressions are
355  // the only potentially trapping value that can occur as the argument to a
356  // PHI.
357  for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I)); ++I)
358  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
360  if (!CE || !CE->canTrap()) continue;
361 
362  // The only case we have to worry about is when the edge is critical.
363  // Since this block has a PHI Node, we assume it has multiple input
364  // edges: check to see if the pred has multiple successors.
365  BasicBlock *Pred = PN->getIncomingBlock(i);
366  if (Pred->getTerminator()->getNumSuccessors() == 1)
367  continue;
368 
369  // Okay, we have to split this edge.
371  Pred->getTerminator(), GetSuccessorNumber(Pred, &BB),
373  goto ReprocessBlock;
374  }
375  }
376 }
377 
379  // If we already selected that function, we do not need to run SDISel.
380  if (mf.getProperties().hasProperty(
382  return false;
383  // Do some sanity-checking on the command-line options.
385  "-fast-isel-abort > 0 requires -fast-isel");
386 
387  const Function &Fn = mf.getFunction();
388  MF = &mf;
389 
390  // Reset the target options before resetting the optimization
391  // level below.
392  // FIXME: This is a horrible hack and should be processed via
393  // codegen looking at the optimization level explicitly when
394  // it wants to look at it.
396  // Reset OptLevel to None for optnone functions.
397  CodeGenOpt::Level NewOptLevel = OptLevel;
398  if (OptLevel != CodeGenOpt::None && skipFunction(Fn))
399  NewOptLevel = CodeGenOpt::None;
400  OptLevelChanger OLC(*this, NewOptLevel);
401 
404  RegInfo = &MF->getRegInfo();
405  LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
406  GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
407  ORE = make_unique<OptimizationRemarkEmitter>(&Fn);
408  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
409  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
410  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
411  LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
412 
413  DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
414 
415  SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI);
416 
417  CurDAG->init(*MF, *ORE, this, LibInfo);
418  FuncInfo->set(Fn, *MF, CurDAG);
419 
420  // Now get the optional analyzes if we want to.
421  // This is based on the possibly changed OptLevel (after optnone is taken
422  // into account). That's unfortunate but OK because it just means we won't
423  // ask for passes that have been required anyway.
424 
426  FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
427  else
428  FuncInfo->BPI = nullptr;
429 
430  if (OptLevel != CodeGenOpt::None)
431  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
432  else
433  AA = nullptr;
434 
435  SDB->init(GFI, AA, LibInfo);
436 
437  MF->setHasInlineAsm(false);
438 
439  FuncInfo->SplitCSR = false;
440 
441  // We split CSR if the target supports it for the given function
442  // and the function has only return exits.
444  FuncInfo->SplitCSR = true;
445 
446  // Collect all the return blocks.
447  for (const BasicBlock &BB : Fn) {
448  if (!succ_empty(&BB))
449  continue;
450 
451  const TerminatorInst *Term = BB.getTerminator();
452  if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
453  continue;
454 
455  // Bail out if the exit block is not Return nor Unreachable.
456  FuncInfo->SplitCSR = false;
457  break;
458  }
459  }
460 
461  MachineBasicBlock *EntryMBB = &MF->front();
462  if (FuncInfo->SplitCSR)
463  // This performs initialization so lowering for SplitCSR will be correct.
464  TLI->initializeSplitCSR(EntryMBB);
465 
466  SelectAllBasicBlocks(Fn);
468  DiagnosticInfoISelFallback DiagFallback(Fn);
469  Fn.getContext().diagnose(DiagFallback);
470  }
471 
472  // If the first basic block in the function has live ins that need to be
473  // copied into vregs, emit the copies into the top of the block before
474  // emitting the code for the block.
476  RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
477 
478  // Insert copies in the entry block and the return blocks.
479  if (FuncInfo->SplitCSR) {
481  // Collect all the return blocks.
482  for (MachineBasicBlock &MBB : mf) {
483  if (!MBB.succ_empty())
484  continue;
485 
486  MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
487  if (Term != MBB.end() && Term->isReturn()) {
488  Returns.push_back(&MBB);
489  continue;
490  }
491  }
492  TLI->insertCopiesSplitCSR(EntryMBB, Returns);
493  }
494 
496  if (!FuncInfo->ArgDbgValues.empty())
497  for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
498  if (LI.second)
499  LiveInMap.insert(LI);
500 
501  // Insert DBG_VALUE instructions for function arguments to the entry block.
502  for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
504  bool hasFI = MI->getOperand(0).isFI();
505  unsigned Reg =
506  hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
508  EntryMBB->insert(EntryMBB->begin(), MI);
509  else {
511  if (Def) {
512  MachineBasicBlock::iterator InsertPos = Def;
513  // FIXME: VR def may not be in entry block.
514  Def->getParent()->insert(std::next(InsertPos), MI);
515  } else
516  DEBUG(dbgs() << "Dropping debug info for dead vreg"
517  << TargetRegisterInfo::virtReg2Index(Reg) << "\n");
518  }
519 
520  // If Reg is live-in then update debug info to track its copy in a vreg.
521  DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
522  if (LDI != LiveInMap.end()) {
523  assert(!hasFI && "There's no handling of frame pointer updating here yet "
524  "- add if needed");
525  MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
526  MachineBasicBlock::iterator InsertPos = Def;
527  const MDNode *Variable = MI->getDebugVariable();
528  const MDNode *Expr = MI->getDebugExpression();
529  DebugLoc DL = MI->getDebugLoc();
530  bool IsIndirect = MI->isIndirectDebugValue();
531  if (IsIndirect)
532  assert(MI->getOperand(1).getImm() == 0 &&
533  "DBG_VALUE with nonzero offset");
534  assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
535  "Expected inlined-at fields to agree");
536  // Def is never a terminator here, so it is ok to increment InsertPos.
537  BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
538  IsIndirect, LDI->second, Variable, Expr);
539 
540  // If this vreg is directly copied into an exported register then
541  // that COPY instructions also need DBG_VALUE, if it is the only
542  // user of LDI->second.
543  MachineInstr *CopyUseMI = nullptr;
545  UI = RegInfo->use_instr_begin(LDI->second),
546  E = RegInfo->use_instr_end(); UI != E; ) {
547  MachineInstr *UseMI = &*(UI++);
548  if (UseMI->isDebugValue()) continue;
549  if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
550  CopyUseMI = UseMI; continue;
551  }
552  // Otherwise this is another use or second copy use.
553  CopyUseMI = nullptr; break;
554  }
555  if (CopyUseMI) {
556  // Use MI's debug location, which describes where Variable was
557  // declared, rather than whatever is attached to CopyUseMI.
558  MachineInstr *NewMI =
559  BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
560  CopyUseMI->getOperand(0).getReg(), Variable, Expr);
561  MachineBasicBlock::iterator Pos = CopyUseMI;
562  EntryMBB->insertAfter(Pos, NewMI);
563  }
564  }
565  }
566 
567  // Determine if there are any calls in this machine function.
568  MachineFrameInfo &MFI = MF->getFrameInfo();
569  for (const auto &MBB : *MF) {
570  if (MFI.hasCalls() && MF->hasInlineAsm())
571  break;
572 
573  for (const auto &MI : MBB) {
574  const MCInstrDesc &MCID = TII->get(MI.getOpcode());
575  if ((MCID.isCall() && !MCID.isReturn()) ||
576  MI.isStackAligningInlineAsm()) {
577  MFI.setHasCalls(true);
578  }
579  if (MI.isInlineAsm()) {
580  MF->setHasInlineAsm(true);
581  }
582  }
583  }
584 
585  // Determine if there is a call to setjmp in the machine function.
586  MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
587 
588  // Replace forward-declared registers with the registers containing
589  // the desired value.
590  MachineRegisterInfo &MRI = MF->getRegInfo();
593  I != E; ++I) {
594  unsigned From = I->first;
595  unsigned To = I->second;
596  // If To is also scheduled to be replaced, find what its ultimate
597  // replacement is.
598  while (true) {
600  if (J == E) break;
601  To = J->second;
602  }
603  // Make sure the new register has a sufficiently constrained register class.
606  MRI.constrainRegClass(To, MRI.getRegClass(From));
607  // Replace it.
608 
609 
610  // Replacing one register with another won't touch the kill flags.
611  // We need to conservatively clear the kill flags as a kill on the old
612  // register might dominate existing uses of the new register.
613  if (!MRI.use_empty(To))
614  MRI.clearKillFlags(From);
615  MRI.replaceRegWith(From, To);
616  }
617 
618  TLI->finalizeLowering(*MF);
619 
620  // Release function-specific state. SDB and CurDAG are already cleared
621  // at this point.
622  FuncInfo->clear();
623 
624  DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
625  DEBUG(MF->print(dbgs()));
626 
627  return true;
628 }
629 
633  bool ShouldAbort) {
634  // Print the function name explicitly if we don't have a debug location (which
635  // makes the diagnostic less useful) or if we're going to emit a raw error.
636  if (!R.getLocation().isValid() || ShouldAbort)
637  R << (" (in function: " + MF.getName() + ")").str();
638 
639  if (ShouldAbort)
641 
642  ORE.emit(R);
643 }
644 
645 void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
647  bool &HadTailCall) {
648  // Allow creating illegal types during DAG building for the basic block.
650 
651  // Lower the instructions. If a call is emitted as a tail call, cease emitting
652  // nodes for this block.
653  for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
654  if (!ElidedArgCopyInstrs.count(&*I))
655  SDB->visit(*I);
656  }
657 
658  // Make sure the root of the DAG is up-to-date.
660  HadTailCall = SDB->HasTailCall;
661  SDB->clear();
662 
663  // Final step, emit the lowered DAG as machine code.
664  CodeGenAndEmitDAG();
665 }
666 
667 void SelectionDAGISel::ComputeLiveOutVRegInfo() {
668  SmallPtrSet<SDNode*, 16> VisitedNodes;
669  SmallVector<SDNode*, 128> Worklist;
670 
671  Worklist.push_back(CurDAG->getRoot().getNode());
672 
673  KnownBits Known;
674 
675  do {
676  SDNode *N = Worklist.pop_back_val();
677 
678  // If we've already seen this node, ignore it.
679  if (!VisitedNodes.insert(N).second)
680  continue;
681 
682  // Otherwise, add all chain operands to the worklist.
683  for (const SDValue &Op : N->op_values())
684  if (Op.getValueType() == MVT::Other)
685  Worklist.push_back(Op.getNode());
686 
687  // If this is a CopyToReg with a vreg dest, process it.
688  if (N->getOpcode() != ISD::CopyToReg)
689  continue;
690 
691  unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
693  continue;
694 
695  // Ignore non-scalar or non-integer values.
696  SDValue Src = N->getOperand(2);
697  EVT SrcVT = Src.getValueType();
698  if (!SrcVT.isInteger() || SrcVT.isVector())
699  continue;
700 
701  unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
702  CurDAG->computeKnownBits(Src, Known);
703  FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
704  } while (!Worklist.empty());
705 }
706 
707 void SelectionDAGISel::CodeGenAndEmitDAG() {
708  StringRef GroupName = "sdag";
709  StringRef GroupDescription = "Instruction Selection and Scheduling";
710  std::string BlockName;
711  int BlockNumber = -1;
712  (void)BlockNumber;
713  bool MatchFilterBB = false; (void)MatchFilterBB;
714 
715  // Pre-type legalization allow creation of any node types.
717 
718 #ifndef NDEBUG
719  MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
722 #endif
723 #ifdef NDEBUG
724  if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
727 #endif
728  {
729  BlockNumber = FuncInfo->MBB->getNumber();
730  BlockName =
731  (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
732  }
733  DEBUG(dbgs() << "Initial selection DAG: " << printMBBReference(*FuncInfo->MBB)
734  << " '" << BlockName << "'\n";
735  CurDAG->dump());
736 
737  if (ViewDAGCombine1 && MatchFilterBB)
738  CurDAG->viewGraph("dag-combine1 input for " + BlockName);
739 
740  // Run the DAG combiner in pre-legalize mode.
741  {
742  NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
743  GroupDescription, TimePassesIsEnabled);
745  }
746 
747  DEBUG(dbgs() << "Optimized lowered selection DAG: "
748  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
749  << "'\n";
750  CurDAG->dump());
751 
752  // Second step, hack on the DAG until it only uses operations and types that
753  // the target supports.
754  if (ViewLegalizeTypesDAGs && MatchFilterBB)
755  CurDAG->viewGraph("legalize-types input for " + BlockName);
756 
757  bool Changed;
758  {
759  NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
760  GroupDescription, TimePassesIsEnabled);
761  Changed = CurDAG->LegalizeTypes();
762  }
763 
764  DEBUG(dbgs() << "Type-legalized selection DAG: "
765  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
766  << "'\n";
767  CurDAG->dump());
768 
769  // Only allow creation of legal node types.
771 
772  if (Changed) {
773  if (ViewDAGCombineLT && MatchFilterBB)
774  CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
775 
776  // Run the DAG combiner in post-type-legalize mode.
777  {
778  NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
779  GroupName, GroupDescription, TimePassesIsEnabled);
781  }
782 
783  DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
784  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
785  << "'\n";
786  CurDAG->dump());
787  }
788 
789  {
790  NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
791  GroupDescription, TimePassesIsEnabled);
792  Changed = CurDAG->LegalizeVectors();
793  }
794 
795  if (Changed) {
796  DEBUG(dbgs() << "Vector-legalized selection DAG: "
797  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
798  << "'\n";
799  CurDAG->dump());
800 
801  {
802  NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
803  GroupDescription, TimePassesIsEnabled);
805  }
806 
807  DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
808  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
809  << "'\n";
810  CurDAG->dump());
811 
812  if (ViewDAGCombineLT && MatchFilterBB)
813  CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
814 
815  // Run the DAG combiner in post-type-legalize mode.
816  {
817  NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
818  GroupName, GroupDescription, TimePassesIsEnabled);
820  }
821 
822  DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
823  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
824  << "'\n";
825  CurDAG->dump());
826  }
827 
828  if (ViewLegalizeDAGs && MatchFilterBB)
829  CurDAG->viewGraph("legalize input for " + BlockName);
830 
831  {
832  NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
833  GroupDescription, TimePassesIsEnabled);
834  CurDAG->Legalize();
835  }
836 
837  DEBUG(dbgs() << "Legalized selection DAG: "
838  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
839  << "'\n";
840  CurDAG->dump());
841 
842  if (ViewDAGCombine2 && MatchFilterBB)
843  CurDAG->viewGraph("dag-combine2 input for " + BlockName);
844 
845  // Run the DAG combiner in post-legalize mode.
846  {
847  NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
848  GroupDescription, TimePassesIsEnabled);
850  }
851 
852  DEBUG(dbgs() << "Optimized legalized selection DAG: "
853  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
854  << "'\n";
855  CurDAG->dump());
856 
857  if (OptLevel != CodeGenOpt::None)
858  ComputeLiveOutVRegInfo();
859 
860  if (ViewISelDAGs && MatchFilterBB)
861  CurDAG->viewGraph("isel input for " + BlockName);
862 
863  // Third, instruction select all of the operations to machine code, adding the
864  // code to the MachineBasicBlock.
865  {
866  NamedRegionTimer T("isel", "Instruction Selection", GroupName,
867  GroupDescription, TimePassesIsEnabled);
868  DoInstructionSelection();
869  }
870 
871  DEBUG(dbgs() << "Selected selection DAG: "
872  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
873  << "'\n";
874  CurDAG->dump());
875 
876  if (ViewSchedDAGs && MatchFilterBB)
877  CurDAG->viewGraph("scheduler input for " + BlockName);
878 
879  // Schedule machine code.
880  ScheduleDAGSDNodes *Scheduler = CreateScheduler();
881  {
882  NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
883  GroupDescription, TimePassesIsEnabled);
884  Scheduler->Run(CurDAG, FuncInfo->MBB);
885  }
886 
887  if (ViewSUnitDAGs && MatchFilterBB)
888  Scheduler->viewGraph();
889 
890  // Emit machine code to BB. This can change 'BB' to the last block being
891  // inserted into.
892  MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
893  {
894  NamedRegionTimer T("emit", "Instruction Creation", GroupName,
895  GroupDescription, TimePassesIsEnabled);
896 
897  // FuncInfo->InsertPt is passed by reference and set to the end of the
898  // scheduled instructions.
899  LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
900  }
901 
902  // If the block was split, make sure we update any references that are used to
903  // update PHI nodes later on.
904  if (FirstMBB != LastMBB)
905  SDB->UpdateSplitBlock(FirstMBB, LastMBB);
906 
907  // Free the scheduler state.
908  {
909  NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
910  GroupDescription, TimePassesIsEnabled);
911  delete Scheduler;
912  }
913 
914  // Free the SelectionDAG state, now that we're finished with it.
915  CurDAG->clear();
916 }
917 
918 namespace {
919 
920 /// ISelUpdater - helper class to handle updates of the instruction selection
921 /// graph.
922 class ISelUpdater : public SelectionDAG::DAGUpdateListener {
923  SelectionDAG::allnodes_iterator &ISelPosition;
924 
925 public:
926  ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
927  : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
928 
929  /// NodeDeleted - Handle nodes deleted from the graph. If the node being
930  /// deleted is the current ISelPosition node, update ISelPosition.
931  ///
932  void NodeDeleted(SDNode *N, SDNode *E) override {
933  if (ISelPosition == SelectionDAG::allnodes_iterator(N))
934  ++ISelPosition;
935  }
936 };
937 
938 } // end anonymous namespace
939 
940 void SelectionDAGISel::DoInstructionSelection() {
941  DEBUG(dbgs() << "===== Instruction selection begins: "
942  << printMBBReference(*FuncInfo->MBB) << " '"
943  << FuncInfo->MBB->getName() << "'\n");
944 
946 
947  // Select target instructions for the DAG.
948  {
949  // Number all nodes with a topological order and set DAGSize.
951 
952  // Create a dummy node (which is not added to allnodes), that adds
953  // a reference to the root node, preventing it from being deleted,
954  // and tracking any changes of the root.
957  ++ISelPosition;
958 
959  // Make sure that ISelPosition gets properly updated when nodes are deleted
960  // in calls made from this function.
961  ISelUpdater ISU(*CurDAG, ISelPosition);
962 
963  // The AllNodes list is now topological-sorted. Visit the
964  // nodes by starting at the end of the list (the root of the
965  // graph) and preceding back toward the beginning (the entry
966  // node).
967  while (ISelPosition != CurDAG->allnodes_begin()) {
968  SDNode *Node = &*--ISelPosition;
969  // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
970  // but there are currently some corner cases that it misses. Also, this
971  // makes it theoretically possible to disable the DAGCombiner.
972  if (Node->use_empty())
973  continue;
974 
975  // When we are using non-default rounding modes or FP exception behavior
976  // FP operations are represented by StrictFP pseudo-operations. They
977  // need to be simplified here so that the target-specific instruction
978  // selectors know how to handle them.
979  //
980  // If the current node is a strict FP pseudo-op, the isStrictFPOp()
981  // function will provide the corresponding normal FP opcode to which the
982  // node should be mutated.
983  //
984  // FIXME: The backends need a way to handle FP constraints.
985  if (Node->isStrictFPOpcode())
986  Node = CurDAG->mutateStrictFPToFP(Node);
987 
988  DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
989  Node->dump(CurDAG));
990 
991  Select(Node);
992  }
993 
994  CurDAG->setRoot(Dummy.getValue());
995  }
996 
997  DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
998 
1000 }
1001 
1003  for (const User *U : CPI->users()) {
1004  if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
1005  Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1006  if (IID == Intrinsic::eh_exceptionpointer ||
1007  IID == Intrinsic::eh_exceptioncode)
1008  return true;
1009  }
1010  }
1011  return false;
1012 }
1013 
1014 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1015 /// do other setup for EH landing-pad blocks.
1016 bool SelectionDAGISel::PrepareEHLandingPad() {
1017  MachineBasicBlock *MBB = FuncInfo->MBB;
1018  const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1019  const BasicBlock *LLVMBB = MBB->getBasicBlock();
1020  const TargetRegisterClass *PtrRC =
1022 
1023  // Catchpads have one live-in register, which typically holds the exception
1024  // pointer or code.
1025  if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
1026  if (hasExceptionPointerOrCodeUser(CPI)) {
1027  // Get or create the virtual register to hold the pointer or code. Mark
1028  // the live in physreg and copy into the vreg.
1029  MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1030  assert(EHPhysReg && "target lacks exception pointer register");
1031  MBB->addLiveIn(EHPhysReg);
1032  unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
1034  TII->get(TargetOpcode::COPY), VReg)
1035  .addReg(EHPhysReg, RegState::Kill);
1036  }
1037  return true;
1038  }
1039 
1040  if (!LLVMBB->isLandingPad())
1041  return true;
1042 
1043  // Add a label to mark the beginning of the landing pad. Deletion of the
1044  // landing pad can thus be detected via the MachineModuleInfo.
1045  MCSymbol *Label = MF->addLandingPad(MBB);
1046 
1047  // Assign the call site to the landing pad's begin label.
1049 
1050  const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
1051  BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
1052  .addSym(Label);
1053 
1054  // Mark exception register as live in.
1055  if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1057 
1058  // Mark exception selector register as live in.
1059  if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1061 
1062  return true;
1063 }
1064 
1065 /// isFoldedOrDeadInstruction - Return true if the specified instruction is
1066 /// side-effect free and is either dead or folded into a generated instruction.
1067 /// Return false if it needs to be emitted.
1070  return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1071  !isa<TerminatorInst>(I) && // Terminators aren't folded.
1072  !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
1073  !I->isEHPad() && // EH pad instructions aren't folded.
1074  !FuncInfo->isExportedInst(I); // Exported instrs must be computed.
1075 }
1076 
1077 /// Set up SwiftErrorVals by going through the function. If the function has
1078 /// swifterror argument, it will be the first entry.
1079 static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI,
1081  if (!TLI->supportSwiftError())
1082  return;
1083 
1084  FuncInfo->SwiftErrorVals.clear();
1085  FuncInfo->SwiftErrorVRegDefMap.clear();
1086  FuncInfo->SwiftErrorVRegUpwardsUse.clear();
1087  FuncInfo->SwiftErrorVRegDefUses.clear();
1088  FuncInfo->SwiftErrorArg = nullptr;
1089 
1090  // Check if function has a swifterror argument.
1091  bool HaveSeenSwiftErrorArg = false;
1092  for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end();
1093  AI != AE; ++AI)
1094  if (AI->hasSwiftErrorAttr()) {
1095  assert(!HaveSeenSwiftErrorArg &&
1096  "Must have only one swifterror parameter");
1097  (void)HaveSeenSwiftErrorArg; // silence warning.
1098  HaveSeenSwiftErrorArg = true;
1099  FuncInfo->SwiftErrorArg = &*AI;
1100  FuncInfo->SwiftErrorVals.push_back(&*AI);
1101  }
1102 
1103  for (const auto &LLVMBB : Fn)
1104  for (const auto &Inst : LLVMBB) {
1105  if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst))
1106  if (Alloca->isSwiftError())
1107  FuncInfo->SwiftErrorVals.push_back(Alloca);
1108  }
1109 }
1110 
1112  FastISel *FastIS,
1113  const TargetLowering *TLI,
1114  const TargetInstrInfo *TII,
1116  if (!TLI->supportSwiftError())
1117  return;
1118 
1119  // We only need to do this when we have swifterror parameter or swifterror
1120  // alloc.
1121  if (FuncInfo->SwiftErrorVals.empty())
1122  return;
1123 
1124  assert(FuncInfo->MBB == &*FuncInfo->MF->begin() &&
1125  "expected to insert into entry block");
1126  auto &DL = FuncInfo->MF->getDataLayout();
1127  auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
1128  for (const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
1129  // We will always generate a copy from the argument. It is always used at
1130  // least by the 'return' of the swifterror.
1131  if (FuncInfo->SwiftErrorArg && FuncInfo->SwiftErrorArg == SwiftErrorVal)
1132  continue;
1133  unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
1134  // Assign Undef to Vreg. We construct MI directly to make sure it works
1135  // with FastISel.
1136  BuildMI(*FuncInfo->MBB, FuncInfo->MBB->getFirstNonPHI(),
1137  SDB->getCurDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF),
1138  VReg);
1139 
1140  // Keep FastIS informed about the value we just inserted.
1141  if (FastIS)
1142  FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1143 
1144  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorVal, VReg);
1145  }
1146 }
1147 
1148 /// Collect llvm.dbg.declare information. This is done after argument lowering
1149 /// in case the declarations refer to arguments.
1151  MachineFunction *MF = FuncInfo->MF;
1152  const DataLayout &DL = MF->getDataLayout();
1153  for (const BasicBlock &BB : *FuncInfo->Fn) {
1154  for (const Instruction &I : BB) {
1155  const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(&I);
1156  if (!DI)
1157  continue;
1158 
1159  assert(DI->getVariable() && "Missing variable");
1160  assert(DI->getDebugLoc() && "Missing location");
1161  const Value *Address = DI->getAddress();
1162  if (!Address)
1163  continue;
1164 
1165  // Look through casts and constant offset GEPs. These mostly come from
1166  // inalloca.
1167  APInt Offset(DL.getTypeSizeInBits(Address->getType()), 0);
1168  Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1169 
1170  // Check if the variable is a static alloca or a byval or inalloca
1171  // argument passed in memory. If it is not, then we will ignore this
1172  // intrinsic and handle this during isel like dbg.value.
1173  int FI = std::numeric_limits<int>::max();
1174  if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
1175  auto SI = FuncInfo->StaticAllocaMap.find(AI);
1176  if (SI != FuncInfo->StaticAllocaMap.end())
1177  FI = SI->second;
1178  } else if (const auto *Arg = dyn_cast<Argument>(Address))
1179  FI = FuncInfo->getArgumentFrameIndex(Arg);
1180 
1181  if (FI == std::numeric_limits<int>::max())
1182  continue;
1183 
1184  DIExpression *Expr = DI->getExpression();
1185  if (Offset.getBoolValue())
1187  Offset.getZExtValue());
1188  MF->setVariableDbgInfo(DI->getVariable(), Expr, FI, DI->getDebugLoc());
1189  }
1190  }
1191 }
1192 
1193 /// Propagate swifterror values through the machine function CFG.
1195  auto *TLI = FuncInfo->TLI;
1196  if (!TLI->supportSwiftError())
1197  return;
1198 
1199  // We only need to do this when we have swifterror parameter or swifterror
1200  // alloc.
1201  if (FuncInfo->SwiftErrorVals.empty())
1202  return;
1203 
1204  // For each machine basic block in reverse post order.
1206  for (MachineBasicBlock *MBB : RPOT) {
1207  // For each swifterror value in the function.
1208  for(const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
1209  auto Key = std::make_pair(MBB, SwiftErrorVal);
1210  auto UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
1211  auto VRegDefIt = FuncInfo->SwiftErrorVRegDefMap.find(Key);
1212  bool UpwardsUse = UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end();
1213  unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
1214  bool DownwardDef = VRegDefIt != FuncInfo->SwiftErrorVRegDefMap.end();
1215  assert(!(UpwardsUse && !DownwardDef) &&
1216  "We can't have an upwards use but no downwards def");
1217 
1218  // If there is no upwards exposed use and an entry for the swifterror in
1219  // the def map for this value we don't need to do anything: We already
1220  // have a downward def for this basic block.
1221  if (!UpwardsUse && DownwardDef)
1222  continue;
1223 
1224  // Otherwise we either have an upwards exposed use vreg that we need to
1225  // materialize or need to forward the downward def from predecessors.
1226 
1227  // Check whether we have a single vreg def from all predecessors.
1228  // Otherwise we need a phi.
1231  for (auto *Pred : MBB->predecessors()) {
1232  if (!Visited.insert(Pred).second)
1233  continue;
1234  VRegs.push_back(std::make_pair(
1235  Pred, FuncInfo->getOrCreateSwiftErrorVReg(Pred, SwiftErrorVal)));
1236  if (Pred != MBB)
1237  continue;
1238  // We have a self-edge.
1239  // If there was no upwards use in this basic block there is now one: the
1240  // phi needs to use it self.
1241  if (!UpwardsUse) {
1242  UpwardsUse = true;
1243  UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
1244  assert(UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end());
1245  UUseVReg = UUseIt->second;
1246  }
1247  }
1248 
1249  // We need a phi node if we have more than one predecessor with different
1250  // downward defs.
1251  bool needPHI =
1252  VRegs.size() >= 1 &&
1253  std::find_if(
1254  VRegs.begin(), VRegs.end(),
1255  [&](const std::pair<const MachineBasicBlock *, unsigned> &V)
1256  -> bool { return V.second != VRegs[0].second; }) !=
1257  VRegs.end();
1258 
1259  // If there is no upwards exposed used and we don't need a phi just
1260  // forward the swifterror vreg from the predecessor(s).
1261  if (!UpwardsUse && !needPHI) {
1262  assert(!VRegs.empty() &&
1263  "No predecessors? The entry block should bail out earlier");
1264  // Just forward the swifterror vreg from the predecessor(s).
1265  FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, VRegs[0].second);
1266  continue;
1267  }
1268 
1269  auto DLoc = isa<Instruction>(SwiftErrorVal)
1270  ? dyn_cast<Instruction>(SwiftErrorVal)->getDebugLoc()
1271  : DebugLoc();
1272  const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo();
1273 
1274  // If we don't need a phi create a copy to the upward exposed vreg.
1275  if (!needPHI) {
1276  assert(UpwardsUse);
1277  assert(!VRegs.empty() &&
1278  "No predecessors? Is the Calling Convention correct?");
1279  unsigned DestReg = UUseVReg;
1280  BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY),
1281  DestReg)
1282  .addReg(VRegs[0].second);
1283  continue;
1284  }
1285 
1286  // We need a phi: if there is an upwards exposed use we already have a
1287  // destination virtual register number otherwise we generate a new one.
1288  auto &DL = FuncInfo->MF->getDataLayout();
1289  auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
1290  unsigned PHIVReg =
1291  UpwardsUse ? UUseVReg
1292  : FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
1293  MachineInstrBuilder SwiftErrorPHI =
1294  BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
1295  TII->get(TargetOpcode::PHI), PHIVReg);
1296  for (auto BBRegPair : VRegs) {
1297  SwiftErrorPHI.addReg(BBRegPair.second).addMBB(BBRegPair.first);
1298  }
1299 
1300  // We did not have a definition in this block before: store the phi's vreg
1301  // as this block downward exposed def.
1302  if (!UpwardsUse)
1303  FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, PHIVReg);
1304  }
1305  }
1306 }
1307 
1312  if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty())
1313  return;
1314 
1315  // Iterator over instructions and assign vregs to swifterror defs and uses.
1316  for (auto It = Begin; It != End; ++It) {
1317  ImmutableCallSite CS(&*It);
1318  if (CS) {
1319  // A call-site with a swifterror argument is both use and def.
1320  const Value *SwiftErrorAddr = nullptr;
1321  for (auto &Arg : CS.args()) {
1322  if (!Arg->isSwiftError())
1323  continue;
1324  // Use of swifterror.
1325  assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments");
1326  SwiftErrorAddr = &*Arg;
1327  assert(SwiftErrorAddr->isSwiftError() &&
1328  "Must have a swifterror value argument");
1329  unsigned VReg; bool CreatedReg;
1330  std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
1331  &*It, FuncInfo->MBB, SwiftErrorAddr);
1332  assert(CreatedReg);
1333  }
1334  if (!SwiftErrorAddr)
1335  continue;
1336 
1337  // Def of swifterror.
1338  unsigned VReg; bool CreatedReg;
1339  std::tie(VReg, CreatedReg) =
1340  FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
1341  assert(CreatedReg);
1342  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
1343 
1344  // A load is a use.
1345  } else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) {
1346  const Value *V = LI->getOperand(0);
1347  if (!V->isSwiftError())
1348  continue;
1349 
1350  unsigned VReg; bool CreatedReg;
1351  std::tie(VReg, CreatedReg) =
1352  FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V);
1353  assert(CreatedReg);
1354 
1355  // A store is a def.
1356  } else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) {
1357  const Value *SwiftErrorAddr = SI->getOperand(1);
1358  if (!SwiftErrorAddr->isSwiftError())
1359  continue;
1360 
1361  // Def of swifterror.
1362  unsigned VReg; bool CreatedReg;
1363  std::tie(VReg, CreatedReg) =
1364  FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
1365  assert(CreatedReg);
1366  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
1367 
1368  // A return in a swiferror returning function is a use.
1369  } else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) {
1370  const Function *F = R->getParent()->getParent();
1371  if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1372  continue;
1373 
1374  unsigned VReg; bool CreatedReg;
1375  std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
1376  R, FuncInfo->MBB, FuncInfo->SwiftErrorArg);
1377  assert(CreatedReg);
1378  }
1379  }
1380 }
1381 
1382 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1383  FastISelFailed = false;
1384  // Initialize the Fast-ISel state, if needed.
1385  FastISel *FastIS = nullptr;
1386  if (TM.Options.EnableFastISel) {
1387  DEBUG(dbgs() << "Enabling fast-isel\n");
1388  FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1389  }
1390 
1392 
1394 
1395  // Lower arguments up front. An RPO iteration always visits the entry block
1396  // first.
1397  assert(*RPOT.begin() == &Fn.getEntryBlock());
1398  ++NumEntryBlocks;
1399 
1400  // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1403 
1404  if (!FastIS) {
1405  LowerArguments(Fn);
1406  } else {
1407  // See if fast isel can lower the arguments.
1408  FastIS->startNewBlock();
1409  if (!FastIS->lowerArguments()) {
1410  FastISelFailed = true;
1411  // Fast isel failed to lower these arguments
1412  ++NumFastIselFailLowerArguments;
1413 
1414  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1415  Fn.getSubprogram(),
1416  &Fn.getEntryBlock());
1417  R << "FastISel didn't lower all arguments: "
1418  << ore::NV("Prototype", Fn.getType());
1420 
1421  // Use SelectionDAG argument lowering
1422  LowerArguments(Fn);
1424  SDB->clear();
1425  CodeGenAndEmitDAG();
1426  }
1427 
1428  // If we inserted any instructions at the beginning, make a note of
1429  // where they are, so we can be sure to emit subsequent instructions
1430  // after them.
1431  if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1432  FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1433  else
1434  FastIS->setLastLocalValue(nullptr);
1435  }
1437 
1439 
1440  // Iterate over all basic blocks in the function.
1441  for (const BasicBlock *LLVMBB : RPOT) {
1442  if (OptLevel != CodeGenOpt::None) {
1443  bool AllPredsVisited = true;
1444  for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
1445  PI != PE; ++PI) {
1446  if (!FuncInfo->VisitedBBs.count(*PI)) {
1447  AllPredsVisited = false;
1448  break;
1449  }
1450  }
1451 
1452  if (AllPredsVisited) {
1453  for (const PHINode &PN : LLVMBB->phis())
1455  } else {
1456  for (const PHINode &PN : LLVMBB->phis())
1458  }
1459 
1460  FuncInfo->VisitedBBs.insert(LLVMBB);
1461  }
1462 
1463  BasicBlock::const_iterator const Begin =
1464  LLVMBB->getFirstNonPHI()->getIterator();
1465  BasicBlock::const_iterator const End = LLVMBB->end();
1467 
1468  FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1469  if (!FuncInfo->MBB)
1470  continue; // Some blocks like catchpads have no code or MBB.
1471 
1472  // Insert new instructions after any phi or argument setup code.
1473  FuncInfo->InsertPt = FuncInfo->MBB->end();
1474 
1475  // Setup an EH landing-pad block.
1478  if (LLVMBB->isEHPad())
1479  if (!PrepareEHLandingPad())
1480  continue;
1481 
1482  // Before doing SelectionDAG ISel, see if FastISel has been requested.
1483  if (FastIS) {
1484  if (LLVMBB != &Fn.getEntryBlock())
1485  FastIS->startNewBlock();
1486 
1487  unsigned NumFastIselRemaining = std::distance(Begin, End);
1488 
1489  // Pre-assign swifterror vregs.
1490  preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End);
1491 
1492  // Do FastISel on as many instructions as possible.
1493  for (; BI != Begin; --BI) {
1494  const Instruction *Inst = &*std::prev(BI);
1495 
1496  // If we no longer require this instruction, skip it.
1497  if (isFoldedOrDeadInstruction(Inst, FuncInfo) ||
1498  ElidedArgCopyInstrs.count(Inst)) {
1499  --NumFastIselRemaining;
1500  continue;
1501  }
1502 
1503  // Bottom-up: reset the insert pos at the top, after any local-value
1504  // instructions.
1505  FastIS->recomputeInsertPt();
1506 
1507  // Try to select the instruction with FastISel.
1508  if (FastIS->selectInstruction(Inst)) {
1509  --NumFastIselRemaining;
1510  ++NumFastIselSuccess;
1511  // If fast isel succeeded, skip over all the folded instructions, and
1512  // then see if there is a load right before the selected instructions.
1513  // Try to fold the load if so.
1514  const Instruction *BeforeInst = Inst;
1515  while (BeforeInst != &*Begin) {
1516  BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
1517  if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
1518  break;
1519  }
1520  if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
1521  BeforeInst->hasOneUse() &&
1522  FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
1523  // If we succeeded, don't re-select the load.
1524  BI = std::next(BasicBlock::const_iterator(BeforeInst));
1525  --NumFastIselRemaining;
1526  ++NumFastIselSuccess;
1527  }
1528  continue;
1529  }
1530 
1531  FastISelFailed = true;
1532 
1533  // Then handle certain instructions as single-LLVM-Instruction blocks.
1534  // We cannot separate out GCrelocates to their own blocks since we need
1535  // to keep track of gc-relocates for a particular gc-statepoint. This is
1536  // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1537  // visitGCRelocate.
1538  if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst)) {
1539  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1540  Inst->getDebugLoc(), LLVMBB);
1541 
1542  R << "FastISel missed call";
1543 
1544  if (R.isEnabled() || EnableFastISelAbort) {
1545  std::string InstStrStorage;
1546  raw_string_ostream InstStr(InstStrStorage);
1547  InstStr << *Inst;
1548 
1549  R << ": " << InstStr.str();
1550  }
1551 
1553 
1554  if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1555  !Inst->use_empty()) {
1556  unsigned &R = FuncInfo->ValueMap[Inst];
1557  if (!R)
1558  R = FuncInfo->CreateRegs(Inst->getType());
1559  }
1560 
1561  bool HadTailCall = false;
1563  SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
1564 
1565  // If the call was emitted as a tail call, we're done with the block.
1566  // We also need to delete any previously emitted instructions.
1567  if (HadTailCall) {
1568  FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
1569  --BI;
1570  break;
1571  }
1572 
1573  // Recompute NumFastIselRemaining as Selection DAG instruction
1574  // selection may have handled the call, input args, etc.
1575  unsigned RemainingNow = std::distance(Begin, BI);
1576  NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1577  NumFastIselRemaining = RemainingNow;
1578  continue;
1579  }
1580 
1581  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1582  Inst->getDebugLoc(), LLVMBB);
1583 
1584  bool ShouldAbort = EnableFastISelAbort;
1585  if (isa<TerminatorInst>(Inst)) {
1586  // Use a different message for terminator misses.
1587  R << "FastISel missed terminator";
1588  // Don't abort for terminator unless the level is really high
1589  ShouldAbort = (EnableFastISelAbort > 2);
1590  } else {
1591  R << "FastISel missed";
1592  }
1593 
1594  if (R.isEnabled() || EnableFastISelAbort) {
1595  std::string InstStrStorage;
1596  raw_string_ostream InstStr(InstStrStorage);
1597  InstStr << *Inst;
1598  R << ": " << InstStr.str();
1599  }
1600 
1601  reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
1602 
1603  NumFastIselFailures += NumFastIselRemaining;
1604  break;
1605  }
1606 
1607  FastIS->recomputeInsertPt();
1608  }
1609 
1610  if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB)) {
1611  bool FunctionBasedInstrumentation =
1613  SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
1614  FunctionBasedInstrumentation);
1615  }
1616 
1617  if (Begin != BI)
1618  ++NumDAGBlocks;
1619  else
1620  ++NumFastIselBlocks;
1621 
1622  if (Begin != BI) {
1623  // Run SelectionDAG instruction selection on the remainder of the block
1624  // not handled by FastISel. If FastISel is not run, this is the entire
1625  // block.
1626  bool HadTailCall;
1627  SelectBasicBlock(Begin, BI, HadTailCall);
1628 
1629  // But if FastISel was run, we already selected some of the block.
1630  // If we emitted a tail-call, we need to delete any previously emitted
1631  // instruction that follows it.
1632  if (HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1634  }
1635 
1636  FinishBasicBlock();
1637  FuncInfo->PHINodesToUpdate.clear();
1638  ElidedArgCopyInstrs.clear();
1639  }
1640 
1642 
1643  delete FastIS;
1645  SDB->SPDescriptor.resetPerFunctionState();
1646 }
1647 
1648 /// Given that the input MI is before a partial terminator sequence TSeq, return
1649 /// true if M + TSeq also a partial terminator sequence.
1650 ///
1651 /// A Terminator sequence is a sequence of MachineInstrs which at this point in
1652 /// lowering copy vregs into physical registers, which are then passed into
1653 /// terminator instructors so we can satisfy ABI constraints. A partial
1654 /// terminator sequence is an improper subset of a terminator sequence (i.e. it
1655 /// may be the whole terminator sequence).
1657  // If we do not have a copy or an implicit def, we return true if and only if
1658  // MI is a debug value.
1659  if (!MI.isCopy() && !MI.isImplicitDef())
1660  // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
1661  // physical registers if there is debug info associated with the terminator
1662  // of our mbb. We want to include said debug info in our terminator
1663  // sequence, so we return true in that case.
1664  return MI.isDebugValue();
1665 
1666  // We have left the terminator sequence if we are not doing one of the
1667  // following:
1668  //
1669  // 1. Copying a vreg into a physical register.
1670  // 2. Copying a vreg into a vreg.
1671  // 3. Defining a register via an implicit def.
1672 
1673  // OPI should always be a register definition...
1675  if (!OPI->isReg() || !OPI->isDef())
1676  return false;
1677 
1678  // Defining any register via an implicit def is always ok.
1679  if (MI.isImplicitDef())
1680  return true;
1681 
1682  // Grab the copy source...
1684  ++OPI2;
1685  assert(OPI2 != MI.operands_end()
1686  && "Should have a copy implying we should have 2 arguments.");
1687 
1688  // Make sure that the copy dest is not a vreg when the copy source is a
1689  // physical register.
1690  if (!OPI2->isReg() ||
1693  return false;
1694 
1695  return true;
1696 }
1697 
1698 /// Find the split point at which to splice the end of BB into its success stack
1699 /// protector check machine basic block.
1700 ///
1701 /// On many platforms, due to ABI constraints, terminators, even before register
1702 /// allocation, use physical registers. This creates an issue for us since
1703 /// physical registers at this point can not travel across basic
1704 /// blocks. Luckily, selectiondag always moves physical registers into vregs
1705 /// when they enter functions and moves them through a sequence of copies back
1706 /// into the physical registers right before the terminator creating a
1707 /// ``Terminator Sequence''. This function is searching for the beginning of the
1708 /// terminator sequence so that we can ensure that we splice off not just the
1709 /// terminator, but additionally the copies that move the vregs into the
1710 /// physical registers.
1714  //
1715  if (SplitPoint == BB->begin())
1716  return SplitPoint;
1717 
1718  MachineBasicBlock::iterator Start = BB->begin();
1719  MachineBasicBlock::iterator Previous = SplitPoint;
1720  --Previous;
1721 
1722  while (MIIsInTerminatorSequence(*Previous)) {
1723  SplitPoint = Previous;
1724  if (Previous == Start)
1725  break;
1726  --Previous;
1727  }
1728 
1729  return SplitPoint;
1730 }
1731 
1732 void
1733 SelectionDAGISel::FinishBasicBlock() {
1734  DEBUG(dbgs() << "Total amount of phi nodes to update: "
1735  << FuncInfo->PHINodesToUpdate.size() << "\n";
1736  for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
1737  dbgs() << "Node " << i << " : ("
1738  << FuncInfo->PHINodesToUpdate[i].first
1739  << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1740 
1741  // Next, now that we know what the last MBB the LLVM BB expanded is, update
1742  // PHI nodes in successors.
1743  for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1745  assert(PHI->isPHI() &&
1746  "This is not a machine PHI node that we are updating!");
1747  if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
1748  continue;
1749  PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
1750  }
1751 
1752  // Handle stack protector.
1753  if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1754  // The target provides a guard check function. There is no need to
1755  // generate error handling code or to split current basic block.
1756  MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1757 
1758  // Add load and check to the basicblock.
1759  FuncInfo->MBB = ParentMBB;
1760  FuncInfo->InsertPt =
1763  CurDAG->setRoot(SDB->getRoot());
1764  SDB->clear();
1765  CodeGenAndEmitDAG();
1766 
1767  // Clear the Per-BB State.
1768  SDB->SPDescriptor.resetPerBBState();
1769  } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1770  MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1771  MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1772 
1773  // Find the split point to split the parent mbb. At the same time copy all
1774  // physical registers used in the tail of parent mbb into virtual registers
1775  // before the split point and back into physical registers after the split
1776  // point. This prevents us needing to deal with Live-ins and many other
1777  // register allocation issues caused by us splitting the parent mbb. The
1778  // register allocator will clean up said virtual copies later on.
1779  MachineBasicBlock::iterator SplitPoint =
1781 
1782  // Splice the terminator of ParentMBB into SuccessMBB.
1783  SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
1784  SplitPoint,
1785  ParentMBB->end());
1786 
1787  // Add compare/jump on neq/jump to the parent BB.
1788  FuncInfo->MBB = ParentMBB;
1789  FuncInfo->InsertPt = ParentMBB->end();
1791  CurDAG->setRoot(SDB->getRoot());
1792  SDB->clear();
1793  CodeGenAndEmitDAG();
1794 
1795  // CodeGen Failure MBB if we have not codegened it yet.
1796  MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1797  if (FailureMBB->empty()) {
1798  FuncInfo->MBB = FailureMBB;
1799  FuncInfo->InsertPt = FailureMBB->end();
1801  CurDAG->setRoot(SDB->getRoot());
1802  SDB->clear();
1803  CodeGenAndEmitDAG();
1804  }
1805 
1806  // Clear the Per-BB State.
1807  SDB->SPDescriptor.resetPerBBState();
1808  }
1809 
1810  // Lower each BitTestBlock.
1811  for (auto &BTB : SDB->BitTestCases) {
1812  // Lower header first, if it wasn't already lowered
1813  if (!BTB.Emitted) {
1814  // Set the current basic block to the mbb we wish to insert the code into
1815  FuncInfo->MBB = BTB.Parent;
1816  FuncInfo->InsertPt = FuncInfo->MBB->end();
1817  // Emit the code
1819  CurDAG->setRoot(SDB->getRoot());
1820  SDB->clear();
1821  CodeGenAndEmitDAG();
1822  }
1823 
1824  BranchProbability UnhandledProb = BTB.Prob;
1825  for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
1826  UnhandledProb -= BTB.Cases[j].ExtraProb;
1827  // Set the current basic block to the mbb we wish to insert the code into
1828  FuncInfo->MBB = BTB.Cases[j].ThisBB;
1829  FuncInfo->InsertPt = FuncInfo->MBB->end();
1830  // Emit the code
1831 
1832  // If all cases cover a contiguous range, it is not necessary to jump to
1833  // the default block after the last bit test fails. This is because the
1834  // range check during bit test header creation has guaranteed that every
1835  // case here doesn't go outside the range. In this case, there is no need
1836  // to perform the last bit test, as it will always be true. Instead, make
1837  // the second-to-last bit-test fall through to the target of the last bit
1838  // test, and delete the last bit test.
1839 
1840  MachineBasicBlock *NextMBB;
1841  if (BTB.ContiguousRange && j + 2 == ej) {
1842  // Second-to-last bit-test with contiguous range: fall through to the
1843  // target of the final bit test.
1844  NextMBB = BTB.Cases[j + 1].TargetBB;
1845  } else if (j + 1 == ej) {
1846  // For the last bit test, fall through to Default.
1847  NextMBB = BTB.Default;
1848  } else {
1849  // Otherwise, fall through to the next bit test.
1850  NextMBB = BTB.Cases[j + 1].ThisBB;
1851  }
1852 
1853  SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
1854  FuncInfo->MBB);
1855 
1856  CurDAG->setRoot(SDB->getRoot());
1857  SDB->clear();
1858  CodeGenAndEmitDAG();
1859 
1860  if (BTB.ContiguousRange && j + 2 == ej) {
1861  // Since we're not going to use the final bit test, remove it.
1862  BTB.Cases.pop_back();
1863  break;
1864  }
1865  }
1866 
1867  // Update PHI Nodes
1868  for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1869  pi != pe; ++pi) {
1870  MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
1871  MachineBasicBlock *PHIBB = PHI->getParent();
1872  assert(PHI->isPHI() &&
1873  "This is not a machine PHI node that we are updating!");
1874  // This is "default" BB. We have two jumps to it. From "header" BB and
1875  // from last "case" BB, unless the latter was skipped.
1876  if (PHIBB == BTB.Default) {
1877  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent);
1878  if (!BTB.ContiguousRange) {
1879  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
1880  .addMBB(BTB.Cases.back().ThisBB);
1881  }
1882  }
1883  // One of "cases" BB.
1884  for (unsigned j = 0, ej = BTB.Cases.size();
1885  j != ej; ++j) {
1886  MachineBasicBlock* cBB = BTB.Cases[j].ThisBB;
1887  if (cBB->isSuccessor(PHIBB))
1888  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB);
1889  }
1890  }
1891  }
1892  SDB->BitTestCases.clear();
1893 
1894  // If the JumpTable record is filled in, then we need to emit a jump table.
1895  // Updating the PHI nodes is tricky in this case, since we need to determine
1896  // whether the PHI is a successor of the range check MBB or the jump table MBB
1897  for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
1898  // Lower header first, if it wasn't already lowered
1899  if (!SDB->JTCases[i].first.Emitted) {
1900  // Set the current basic block to the mbb we wish to insert the code into
1901  FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
1902  FuncInfo->InsertPt = FuncInfo->MBB->end();
1903  // Emit the code
1904  SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
1905  FuncInfo->MBB);
1906  CurDAG->setRoot(SDB->getRoot());
1907  SDB->clear();
1908  CodeGenAndEmitDAG();
1909  }
1910 
1911  // Set the current basic block to the mbb we wish to insert the code into
1912  FuncInfo->MBB = SDB->JTCases[i].second.MBB;
1913  FuncInfo->InsertPt = FuncInfo->MBB->end();
1914  // Emit the code
1915  SDB->visitJumpTable(SDB->JTCases[i].second);
1916  CurDAG->setRoot(SDB->getRoot());
1917  SDB->clear();
1918  CodeGenAndEmitDAG();
1919 
1920  // Update PHI Nodes
1921  for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1922  pi != pe; ++pi) {
1923  MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
1924  MachineBasicBlock *PHIBB = PHI->getParent();
1925  assert(PHI->isPHI() &&
1926  "This is not a machine PHI node that we are updating!");
1927  // "default" BB. We can go there only from header BB.
1928  if (PHIBB == SDB->JTCases[i].second.Default)
1929  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
1930  .addMBB(SDB->JTCases[i].first.HeaderBB);
1931  // JT BB. Just iterate over successors here
1932  if (FuncInfo->MBB->isSuccessor(PHIBB))
1933  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
1934  }
1935  }
1936  SDB->JTCases.clear();
1937 
1938  // If we generated any switch lowering information, build and codegen any
1939  // additional DAGs necessary.
1940  for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
1941  // Set the current basic block to the mbb we wish to insert the code into
1942  FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
1943  FuncInfo->InsertPt = FuncInfo->MBB->end();
1944 
1945  // Determine the unique successors.
1947  Succs.push_back(SDB->SwitchCases[i].TrueBB);
1948  if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
1949  Succs.push_back(SDB->SwitchCases[i].FalseBB);
1950 
1951  // Emit the code. Note that this could result in FuncInfo->MBB being split.
1953  CurDAG->setRoot(SDB->getRoot());
1954  SDB->clear();
1955  CodeGenAndEmitDAG();
1956 
1957  // Remember the last block, now that any splitting is done, for use in
1958  // populating PHI nodes in successors.
1959  MachineBasicBlock *ThisBB = FuncInfo->MBB;
1960 
1961  // Handle any PHI nodes in successors of this chunk, as if we were coming
1962  // from the original BB before switch expansion. Note that PHI nodes can
1963  // occur multiple times in PHINodesToUpdate. We have to be very careful to
1964  // handle them the right number of times.
1965  for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
1966  FuncInfo->MBB = Succs[i];
1967  FuncInfo->InsertPt = FuncInfo->MBB->end();
1968  // FuncInfo->MBB may have been removed from the CFG if a branch was
1969  // constant folded.
1970  if (ThisBB->isSuccessor(FuncInfo->MBB)) {
1972  MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
1973  MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
1974  MachineInstrBuilder PHI(*MF, MBBI);
1975  // This value for this PHI node is recorded in PHINodesToUpdate.
1976  for (unsigned pn = 0; ; ++pn) {
1977  assert(pn != FuncInfo->PHINodesToUpdate.size() &&
1978  "Didn't find PHI entry!");
1979  if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
1980  PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
1981  break;
1982  }
1983  }
1984  }
1985  }
1986  }
1987  }
1988  SDB->SwitchCases.clear();
1989 }
1990 
1991 /// Create the scheduler. If a specific scheduler was specified
1992 /// via the SchedulerRegistry, use it, otherwise select the
1993 /// one preferred by the target.
1994 ///
1995 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
1996  return ISHeuristic(this, OptLevel);
1997 }
1998 
1999 //===----------------------------------------------------------------------===//
2000 // Helper functions used by the generated instruction selector.
2001 //===----------------------------------------------------------------------===//
2002 // Calls to these methods are generated by tblgen.
2003 
2004 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
2005 /// the dag combiner simplified the 255, we still want to match. RHS is the
2006 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2007 /// specified in the .td file (e.g. 255).
2009  int64_t DesiredMaskS) const {
2010  const APInt &ActualMask = RHS->getAPIntValue();
2011  const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2012 
2013  // If the actual mask exactly matches, success!
2014  if (ActualMask == DesiredMask)
2015  return true;
2016 
2017  // If the actual AND mask is allowing unallowed bits, this doesn't match.
2018  if (ActualMask.intersects(~DesiredMask))
2019  return false;
2020 
2021  // Otherwise, the DAG Combiner may have proven that the value coming in is
2022  // either already zero or is not demanded. Check for known zero input bits.
2023  APInt NeededMask = DesiredMask & ~ActualMask;
2024  if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
2025  return true;
2026 
2027  // TODO: check to see if missing bits are just not demanded.
2028 
2029  // Otherwise, this pattern doesn't match.
2030  return false;
2031 }
2032 
2033 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
2034 /// the dag combiner simplified the 255, we still want to match. RHS is the
2035 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2036 /// specified in the .td file (e.g. 255).
2038  int64_t DesiredMaskS) const {
2039  const APInt &ActualMask = RHS->getAPIntValue();
2040  const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2041 
2042  // If the actual mask exactly matches, success!
2043  if (ActualMask == DesiredMask)
2044  return true;
2045 
2046  // If the actual AND mask is allowing unallowed bits, this doesn't match.
2047  if (ActualMask.intersects(~DesiredMask))
2048  return false;
2049 
2050  // Otherwise, the DAG Combiner may have proven that the value coming in is
2051  // either already zero or is not demanded. Check for known zero input bits.
2052  APInt NeededMask = DesiredMask & ~ActualMask;
2053 
2054  KnownBits Known;
2055  CurDAG->computeKnownBits(LHS, Known);
2056 
2057  // If all the missing bits in the or are already known to be set, match!
2058  if (NeededMask.isSubsetOf(Known.One))
2059  return true;
2060 
2061  // TODO: check to see if missing bits are just not demanded.
2062 
2063  // Otherwise, this pattern doesn't match.
2064  return false;
2065 }
2066 
2067 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2068 /// by tblgen. Others should not call it.
2070  const SDLoc &DL) {
2071  std::vector<SDValue> InOps;
2072  std::swap(InOps, Ops);
2073 
2074  Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
2075  Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
2076  Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
2077  Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2078 
2079  unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2080  if (InOps[e-1].getValueType() == MVT::Glue)
2081  --e; // Don't process a glue operand if it is here.
2082 
2083  while (i != e) {
2084  unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
2085  if (!InlineAsm::isMemKind(Flags)) {
2086  // Just skip over this operand, copying the operands verbatim.
2087  Ops.insert(Ops.end(), InOps.begin()+i,
2088  InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
2089  i += InlineAsm::getNumOperandRegisters(Flags) + 1;
2090  } else {
2092  "Memory operand with multiple values?");
2093 
2094  unsigned TiedToOperand;
2095  if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
2096  // We need the constraint ID from the operand this is tied to.
2097  unsigned CurOp = InlineAsm::Op_FirstOperand;
2098  Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2099  for (; TiedToOperand; --TiedToOperand) {
2100  CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
2101  Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2102  }
2103  }
2104 
2105  // Otherwise, this is a memory operand. Ask the target to select it.
2106  std::vector<SDValue> SelOps;
2107  unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
2108  if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
2109  report_fatal_error("Could not match memory address. Inline asm"
2110  " failure!");
2111 
2112  // Add this to the output node.
2113  unsigned NewFlags =
2115  NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
2116  Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
2117  Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
2118  i += 2;
2119  }
2120  }
2121 
2122  // Add the glue input back if present.
2123  if (e != InOps.size())
2124  Ops.push_back(InOps.back());
2125 }
2126 
2127 /// findGlueUse - Return use of MVT::Glue value produced by the specified
2128 /// SDNode.
2129 ///
2131  unsigned FlagResNo = N->getNumValues()-1;
2132  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2133  SDUse &Use = I.getUse();
2134  if (Use.getResNo() == FlagResNo)
2135  return Use.getUser();
2136  }
2137  return nullptr;
2138 }
2139 
2140 /// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
2141 /// This function iteratively traverses up the operand chain, ignoring
2142 /// certain nodes.
2143 static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
2144  SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
2145  bool IgnoreChains) {
2146  // The NodeID's are given uniques ID's where a node ID is guaranteed to be
2147  // greater than all of its (recursive) operands. If we scan to a point where
2148  // 'use' is smaller than the node we're scanning for, then we know we will
2149  // never find it.
2150  //
2151  // The Use may be -1 (unassigned) if it is a newly allocated node. This can
2152  // happen because we scan down to newly selected nodes in the case of glue
2153  // uses.
2154  std::vector<SDNode *> WorkList;
2155  WorkList.push_back(Use);
2156 
2157  while (!WorkList.empty()) {
2158  Use = WorkList.back();
2159  WorkList.pop_back();
2160  // NodeId topological order of TokenFactors is not guaranteed. Do not skip.
2161  if (Use->getOpcode() != ISD::TokenFactor &&
2162  Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1)
2163  continue;
2164 
2165  // Don't revisit nodes if we already scanned it and didn't fail, we know we
2166  // won't fail if we scan it again.
2167  if (!Visited.insert(Use).second)
2168  continue;
2169 
2170  for (const SDValue &Op : Use->op_values()) {
2171  // Ignore chain uses, they are validated by HandleMergeInputChains.
2172  if (Op.getValueType() == MVT::Other && IgnoreChains)
2173  continue;
2174 
2175  SDNode *N = Op.getNode();
2176  if (N == Def) {
2177  if (Use == ImmedUse || Use == Root)
2178  continue; // We are not looking for immediate use.
2179  assert(N != Root);
2180  return true;
2181  }
2182 
2183  // Traverse up the operand chain.
2184  WorkList.push_back(N);
2185  }
2186  }
2187  return false;
2188 }
2189 
2190 /// IsProfitableToFold - Returns true if it's profitable to fold the specific
2191 /// operand node N of U during instruction selection that starts at Root.
2193  SDNode *Root) const {
2194  if (OptLevel == CodeGenOpt::None) return false;
2195  return N.hasOneUse();
2196 }
2197 
2198 /// IsLegalToFold - Returns true if the specific operand node N of
2199 /// U can be folded during instruction selection that starts at Root.
2202  bool IgnoreChains) {
2203  if (OptLevel == CodeGenOpt::None) return false;
2204 
2205  // If Root use can somehow reach N through a path that that doesn't contain
2206  // U then folding N would create a cycle. e.g. In the following
2207  // diagram, Root can reach N through X. If N is folded into into Root, then
2208  // X is both a predecessor and a successor of U.
2209  //
2210  // [N*] //
2211  // ^ ^ //
2212  // / \ //
2213  // [U*] [X]? //
2214  // ^ ^ //
2215  // \ / //
2216  // \ / //
2217  // [Root*] //
2218  //
2219  // * indicates nodes to be folded together.
2220  //
2221  // If Root produces glue, then it gets (even more) interesting. Since it
2222  // will be "glued" together with its glue use in the scheduler, we need to
2223  // check if it might reach N.
2224  //
2225  // [N*] //
2226  // ^ ^ //
2227  // / \ //
2228  // [U*] [X]? //
2229  // ^ ^ //
2230  // \ \ //
2231  // \ | //
2232  // [Root*] | //
2233  // ^ | //
2234  // f | //
2235  // | / //
2236  // [Y] / //
2237  // ^ / //
2238  // f / //
2239  // | / //
2240  // [GU] //
2241  //
2242  // If GU (glue use) indirectly reaches N (the load), and Root folds N
2243  // (call it Fold), then X is a predecessor of GU and a successor of
2244  // Fold. But since Fold and GU are glued together, this will create
2245  // a cycle in the scheduling graph.
2246 
2247  // If the node has glue, walk down the graph to the "lowest" node in the
2248  // glueged set.
2249  EVT VT = Root->getValueType(Root->getNumValues()-1);
2250  while (VT == MVT::Glue) {
2251  SDNode *GU = findGlueUse(Root);
2252  if (!GU)
2253  break;
2254  Root = GU;
2255  VT = Root->getValueType(Root->getNumValues()-1);
2256 
2257  // If our query node has a glue result with a use, we've walked up it. If
2258  // the user (which has already been selected) has a chain or indirectly uses
2259  // the chain, our WalkChainUsers predicate will not consider it. Because of
2260  // this, we cannot ignore chains in this predicate.
2261  IgnoreChains = false;
2262  }
2263 
2264  SmallPtrSet<SDNode*, 16> Visited;
2265  return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
2266 }
2267 
2268 void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
2269  SDLoc DL(N);
2270 
2271  std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2273 
2274  const EVT VTs[] = {MVT::Other, MVT::Glue};
2275  SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops);
2276  New->setNodeId(-1);
2277  ReplaceUses(N, New.getNode());
2278  CurDAG->RemoveDeadNode(N);
2279 }
2280 
2281 void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2282  SDLoc dl(Op);
2284  const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2285  unsigned Reg =
2286  TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
2287  *CurDAG);
2288  SDValue New = CurDAG->getCopyFromReg(
2289  Op->getOperand(0), dl, Reg, Op->getValueType(0));
2290  New->setNodeId(-1);
2291  ReplaceUses(Op, New.getNode());
2292  CurDAG->RemoveDeadNode(Op);
2293 }
2294 
2295 void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2296  SDLoc dl(Op);
2298  const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2299  unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
2300  Op->getOperand(2).getValueType(),
2301  *CurDAG);
2302  SDValue New = CurDAG->getCopyToReg(
2303  Op->getOperand(0), dl, Reg, Op->getOperand(2));
2304  New->setNodeId(-1);
2305  ReplaceUses(Op, New.getNode());
2306  CurDAG->RemoveDeadNode(Op);
2307 }
2308 
2309 void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2310  CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2311 }
2312 
2313 /// GetVBR - decode a vbr encoding whose top bit is set.
2314 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline uint64_t
2315 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2316  assert(Val >= 128 && "Not a VBR");
2317  Val &= 127; // Remove first vbr bit.
2318 
2319  unsigned Shift = 7;
2320  uint64_t NextBits;
2321  do {
2322  NextBits = MatcherTable[Idx++];
2323  Val |= (NextBits&127) << Shift;
2324  Shift += 7;
2325  } while (NextBits & 128);
2326 
2327  return Val;
2328 }
2329 
2330 /// When a match is complete, this method updates uses of interior chain results
2331 /// to use the new results.
2332 void SelectionDAGISel::UpdateChains(
2333  SDNode *NodeToMatch, SDValue InputChain,
2334  SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2335  SmallVector<SDNode*, 4> NowDeadNodes;
2336 
2337  // Now that all the normal results are replaced, we replace the chain and
2338  // glue results if present.
2339  if (!ChainNodesMatched.empty()) {
2340  assert(InputChain.getNode() &&
2341  "Matched input chains but didn't produce a chain");
2342  // Loop over all of the nodes we matched that produced a chain result.
2343  // Replace all the chain results with the final chain we ended up with.
2344  for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2345  SDNode *ChainNode = ChainNodesMatched[i];
2346  // If ChainNode is null, it's because we replaced it on a previous
2347  // iteration and we cleared it out of the map. Just skip it.
2348  if (!ChainNode)
2349  continue;
2350 
2351  assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2352  "Deleted node left in chain");
2353 
2354  // Don't replace the results of the root node if we're doing a
2355  // MorphNodeTo.
2356  if (ChainNode == NodeToMatch && isMorphNodeTo)
2357  continue;
2358 
2359  SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2360  if (ChainVal.getValueType() == MVT::Glue)
2361  ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
2362  assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2364  *CurDAG, [&](SDNode *N, SDNode *E) {
2365  std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
2366  static_cast<SDNode *>(nullptr));
2367  });
2368  if (ChainNode->getOpcode() != ISD::TokenFactor)
2369  CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain);
2370 
2371  // If the node became dead and we haven't already seen it, delete it.
2372  if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2373  !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
2374  NowDeadNodes.push_back(ChainNode);
2375  }
2376  }
2377 
2378  if (!NowDeadNodes.empty())
2379  CurDAG->RemoveDeadNodes(NowDeadNodes);
2380 
2381  DEBUG(dbgs() << "ISEL: Match complete!\n");
2382 }
2383 
2388 };
2389 
2390 /// WalkChainUsers - Walk down the users of the specified chained node that is
2391 /// part of the pattern we're matching, looking at all of the users we find.
2392 /// This determines whether something is an interior node, whether we have a
2393 /// non-pattern node in between two pattern nodes (which prevent folding because
2394 /// it would induce a cycle) and whether we have a TokenFactor node sandwiched
2395 /// between pattern nodes (in which case the TF becomes part of the pattern).
2396 ///
2397 /// The walk we do here is guaranteed to be small because we quickly get down to
2398 /// already selected nodes "below" us.
2399 static ChainResult
2400 WalkChainUsers(const SDNode *ChainedNode,
2401  SmallVectorImpl<SDNode *> &ChainedNodesInPattern,
2402  DenseMap<const SDNode *, ChainResult> &TokenFactorResult,
2403  SmallVectorImpl<SDNode *> &InteriorChainedNodes) {
2404  ChainResult Result = CR_Simple;
2405 
2406  for (SDNode::use_iterator UI = ChainedNode->use_begin(),
2407  E = ChainedNode->use_end(); UI != E; ++UI) {
2408  // Make sure the use is of the chain, not some other value we produce.
2409  if (UI.getUse().getValueType() != MVT::Other) continue;
2410 
2411  SDNode *User = *UI;
2412 
2413  if (User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
2414  continue;
2415 
2416  // If we see an already-selected machine node, then we've gone beyond the
2417  // pattern that we're selecting down into the already selected chunk of the
2418  // DAG.
2419  unsigned UserOpcode = User->getOpcode();
2420  if (User->isMachineOpcode() ||
2421  UserOpcode == ISD::CopyToReg ||
2422  UserOpcode == ISD::CopyFromReg ||
2423  UserOpcode == ISD::INLINEASM ||
2424  UserOpcode == ISD::EH_LABEL ||
2425  UserOpcode == ISD::LIFETIME_START ||
2426  UserOpcode == ISD::LIFETIME_END) {
2427  // If their node ID got reset to -1 then they've already been selected.
2428  // Treat them like a MachineOpcode.
2429  if (User->getNodeId() == -1)
2430  continue;
2431  }
2432 
2433  // If we have a TokenFactor, we handle it specially.
2434  if (User->getOpcode() != ISD::TokenFactor) {
2435  // If the node isn't a token factor and isn't part of our pattern, then it
2436  // must be a random chained node in between two nodes we're selecting.
2437  // This happens when we have something like:
2438  // x = load ptr
2439  // call
2440  // y = x+4
2441  // store y -> ptr
2442  // Because we structurally match the load/store as a read/modify/write,
2443  // but the call is chained between them. We cannot fold in this case
2444  // because it would induce a cycle in the graph.
2445  if (!std::count(ChainedNodesInPattern.begin(),
2446  ChainedNodesInPattern.end(), User))
2447  return CR_InducesCycle;
2448 
2449  // Otherwise we found a node that is part of our pattern. For example in:
2450  // x = load ptr
2451  // y = x+4
2452  // store y -> ptr
2453  // This would happen when we're scanning down from the load and see the
2454  // store as a user. Record that there is a use of ChainedNode that is
2455  // part of the pattern and keep scanning uses.
2456  Result = CR_LeadsToInteriorNode;
2457  InteriorChainedNodes.push_back(User);
2458  continue;
2459  }
2460 
2461  // If we found a TokenFactor, there are two cases to consider: first if the
2462  // TokenFactor is just hanging "below" the pattern we're matching (i.e. no
2463  // uses of the TF are in our pattern) we just want to ignore it. Second,
2464  // the TokenFactor can be sandwiched in between two chained nodes, like so:
2465  // [Load chain]
2466  // ^
2467  // |
2468  // [Load]
2469  // ^ ^
2470  // | \ DAG's like cheese
2471  // / \ do you?
2472  // / |
2473  // [TokenFactor] [Op]
2474  // ^ ^
2475  // | |
2476  // \ /
2477  // \ /
2478  // [Store]
2479  //
2480  // In this case, the TokenFactor becomes part of our match and we rewrite it
2481  // as a new TokenFactor.
2482  //
2483  // To distinguish these two cases, do a recursive walk down the uses.
2484  auto MemoizeResult = TokenFactorResult.find(User);
2485  bool Visited = MemoizeResult != TokenFactorResult.end();
2486  // Recursively walk chain users only if the result is not memoized.
2487  if (!Visited) {
2488  auto Res = WalkChainUsers(User, ChainedNodesInPattern, TokenFactorResult,
2489  InteriorChainedNodes);
2490  MemoizeResult = TokenFactorResult.insert(std::make_pair(User, Res)).first;
2491  }
2492  switch (MemoizeResult->second) {
2493  case CR_Simple:
2494  // If the uses of the TokenFactor are just already-selected nodes, ignore
2495  // it, it is "below" our pattern.
2496  continue;
2497  case CR_InducesCycle:
2498  // If the uses of the TokenFactor lead to nodes that are not part of our
2499  // pattern that are not selected, folding would turn this into a cycle,
2500  // bail out now.
2501  return CR_InducesCycle;
2503  break; // Otherwise, keep processing.
2504  }
2505 
2506  // Okay, we know we're in the interesting interior case. The TokenFactor
2507  // is now going to be considered part of the pattern so that we rewrite its
2508  // uses (it may have uses that are not part of the pattern) with the
2509  // ultimate chain result of the generated code. We will also add its chain
2510  // inputs as inputs to the ultimate TokenFactor we create.
2511  Result = CR_LeadsToInteriorNode;
2512  if (!Visited) {
2513  ChainedNodesInPattern.push_back(User);
2514  InteriorChainedNodes.push_back(User);
2515  }
2516  }
2517 
2518  return Result;
2519 }
2520 
2521 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2522 /// operation for when the pattern matched at least one node with a chains. The
2523 /// input vector contains a list of all of the chained nodes that we match. We
2524 /// must determine if this is a valid thing to cover (i.e. matching it won't
2525 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2526 /// be used as the input node chain for the generated nodes.
2527 static SDValue
2529  SelectionDAG *CurDAG) {
2530  // Used for memoization. Without it WalkChainUsers could take exponential
2531  // time to run.
2532  DenseMap<const SDNode *, ChainResult> TokenFactorResult;
2533  // Walk all of the chained nodes we've matched, recursively scanning down the
2534  // users of the chain result. This adds any TokenFactor nodes that are caught
2535  // in between chained nodes to the chained and interior nodes list.
2536  SmallVector<SDNode*, 3> InteriorChainedNodes;
2537  for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2538  if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched,
2539  TokenFactorResult,
2540  InteriorChainedNodes) == CR_InducesCycle)
2541  return SDValue(); // Would induce a cycle.
2542  }
2543 
2544  // Okay, we have walked all the matched nodes and collected TokenFactor nodes
2545  // that we are interested in. Form our input TokenFactor node.
2546  SmallVector<SDValue, 3> InputChains;
2547  for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2548  // Add the input chain of this node to the InputChains list (which will be
2549  // the operands of the generated TokenFactor) if it's not an interior node.
2550  SDNode *N = ChainNodesMatched[i];
2551  if (N->getOpcode() != ISD::TokenFactor) {
2552  if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
2553  continue;
2554 
2555  // Otherwise, add the input chain.
2556  SDValue InChain = ChainNodesMatched[i]->getOperand(0);
2557  assert(InChain.getValueType() == MVT::Other && "Not a chain");
2558  InputChains.push_back(InChain);
2559  continue;
2560  }
2561 
2562  // If we have a token factor, we want to add all inputs of the token factor
2563  // that are not part of the pattern we're matching.
2564  for (const SDValue &Op : N->op_values()) {
2565  if (!std::count(ChainNodesMatched.begin(), ChainNodesMatched.end(),
2566  Op.getNode()))
2567  InputChains.push_back(Op);
2568  }
2569  }
2570 
2571  if (InputChains.size() == 1)
2572  return InputChains[0];
2573  return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2574  MVT::Other, InputChains);
2575 }
2576 
2577 /// MorphNode - Handle morphing a node in place for the selector.
2578 SDNode *SelectionDAGISel::
2579 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2580  ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2581  // It is possible we're using MorphNodeTo to replace a node with no
2582  // normal results with one that has a normal result (or we could be
2583  // adding a chain) and the input could have glue and chains as well.
2584  // In this case we need to shift the operands down.
2585  // FIXME: This is a horrible hack and broken in obscure cases, no worse
2586  // than the old isel though.
2587  int OldGlueResultNo = -1, OldChainResultNo = -1;
2588 
2589  unsigned NTMNumResults = Node->getNumValues();
2590  if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
2591  OldGlueResultNo = NTMNumResults-1;
2592  if (NTMNumResults != 1 &&
2593  Node->getValueType(NTMNumResults-2) == MVT::Other)
2594  OldChainResultNo = NTMNumResults-2;
2595  } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
2596  OldChainResultNo = NTMNumResults-1;
2597 
2598  // Call the underlying SelectionDAG routine to do the transmogrification. Note
2599  // that this deletes operands of the old node that become dead.
2600  SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
2601 
2602  // MorphNodeTo can operate in two ways: if an existing node with the
2603  // specified operands exists, it can just return it. Otherwise, it
2604  // updates the node in place to have the requested operands.
2605  if (Res == Node) {
2606  // If we updated the node in place, reset the node ID. To the isel,
2607  // this should be just like a newly allocated machine node.
2608  Res->setNodeId(-1);
2609  }
2610 
2611  unsigned ResNumResults = Res->getNumValues();
2612  // Move the glue if needed.
2613  if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2614  (unsigned)OldGlueResultNo != ResNumResults-1)
2615  CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
2616  SDValue(Res, ResNumResults-1));
2617 
2618  if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2619  --ResNumResults;
2620 
2621  // Move the chain reference if needed.
2622  if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2623  (unsigned)OldChainResultNo != ResNumResults-1)
2624  CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
2625  SDValue(Res, ResNumResults-1));
2626 
2627  // Otherwise, no replacement happened because the node already exists. Replace
2628  // Uses of the old node with the new one.
2629  if (Res != Node) {
2630  CurDAG->ReplaceAllUsesWith(Node, Res);
2631  CurDAG->RemoveDeadNode(Node);
2632  }
2633 
2634  return Res;
2635 }
2636 
2637 /// CheckSame - Implements OP_CheckSame.
2638 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2639 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2640  SDValue N,
2641  const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2642  // Accept if it is exactly the same as a previously recorded node.
2643  unsigned RecNo = MatcherTable[MatcherIndex++];
2644  assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2645  return N == RecordedNodes[RecNo].first;
2646 }
2647 
2648 /// CheckChildSame - Implements OP_CheckChildXSame.
2649 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2650 CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2651  SDValue N,
2652  const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes,
2653  unsigned ChildNo) {
2654  if (ChildNo >= N.getNumOperands())
2655  return false; // Match fails if out of range child #.
2656  return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
2657  RecordedNodes);
2658 }
2659 
2660 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2661 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2662 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2663  const SelectionDAGISel &SDISel) {
2664  return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
2665 }
2666 
2667 /// CheckNodePredicate - Implements OP_CheckNodePredicate.
2668 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2669 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2670  const SelectionDAGISel &SDISel, SDNode *N) {
2671  return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
2672 }
2673 
2674 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2675 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2676  SDNode *N) {
2677  uint16_t Opc = MatcherTable[MatcherIndex++];
2678  Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2679  return N->getOpcode() == Opc;
2680 }
2681 
2682 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2683 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2684  const TargetLowering *TLI, const DataLayout &DL) {
2685  MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2686  if (N.getValueType() == VT) return true;
2687 
2688  // Handle the case when VT is iPTR.
2689  return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2690 }
2691 
2692 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2693 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2694  SDValue N, const TargetLowering *TLI, const DataLayout &DL,
2695  unsigned ChildNo) {
2696  if (ChildNo >= N.getNumOperands())
2697  return false; // Match fails if out of range child #.
2698  return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
2699  DL);
2700 }
2701 
2702 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2703 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2704  SDValue N) {
2705  return cast<CondCodeSDNode>(N)->get() ==
2706  (ISD::CondCode)MatcherTable[MatcherIndex++];
2707 }
2708 
2709 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2710 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2711  SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2712  MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2713  if (cast<VTSDNode>(N)->getVT() == VT)
2714  return true;
2715 
2716  // Handle the case when VT is iPTR.
2717  return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
2718 }
2719 
2720 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2721 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2722  SDValue N) {
2723  int64_t Val = MatcherTable[MatcherIndex++];
2724  if (Val & 128)
2725  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2726 
2728  return C && C->getSExtValue() == Val;
2729 }
2730 
2731 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2732 CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2733  SDValue N, unsigned ChildNo) {
2734  if (ChildNo >= N.getNumOperands())
2735  return false; // Match fails if out of range child #.
2736  return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
2737 }
2738 
2739 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2740 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2741  SDValue N, const SelectionDAGISel &SDISel) {
2742  int64_t Val = MatcherTable[MatcherIndex++];
2743  if (Val & 128)
2744  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2745 
2746  if (N->getOpcode() != ISD::AND) return false;
2747 
2749  return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
2750 }
2751 
2752 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2753 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2754  SDValue N, const SelectionDAGISel &SDISel) {
2755  int64_t Val = MatcherTable[MatcherIndex++];
2756  if (Val & 128)
2757  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2758 
2759  if (N->getOpcode() != ISD::OR) return false;
2760 
2762  return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
2763 }
2764 
2765 /// IsPredicateKnownToFail - If we know how and can do so without pushing a
2766 /// scope, evaluate the current node. If the current predicate is known to
2767 /// fail, set Result=true and return anything. If the current predicate is
2768 /// known to pass, set Result=false and return the MatcherIndex to continue
2769 /// with. If the current predicate is unknown, set Result=false and return the
2770 /// MatcherIndex to continue with.
2771 static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2772  unsigned Index, SDValue N,
2773  bool &Result,
2774  const SelectionDAGISel &SDISel,
2775  SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2776  switch (Table[Index++]) {
2777  default:
2778  Result = false;
2779  return Index-1; // Could not evaluate this predicate.
2781  Result = !::CheckSame(Table, Index, N, RecordedNodes);
2782  return Index;
2787  Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
2788  Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2789  return Index;
2791  Result = !::CheckPatternPredicate(Table, Index, SDISel);
2792  return Index;
2794  Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
2795  return Index;
2797  Result = !::CheckOpcode(Table, Index, N.getNode());
2798  return Index;
2800  Result = !::CheckType(Table, Index, N, SDISel.TLI,
2801  SDISel.CurDAG->getDataLayout());
2802  return Index;
2804  unsigned Res = Table[Index++];
2805  Result = !::CheckType(Table, Index, N.getValue(Res), SDISel.TLI,
2806  SDISel.CurDAG->getDataLayout());
2807  return Index;
2808  }
2817  Result = !::CheckChildType(
2818  Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
2819  Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
2820  return Index;
2822  Result = !::CheckCondCode(Table, Index, N);
2823  return Index;
2825  Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
2826  SDISel.CurDAG->getDataLayout());
2827  return Index;
2829  Result = !::CheckInteger(Table, Index, N);
2830  return Index;
2836  Result = !::CheckChildInteger(Table, Index, N,
2837  Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
2838  return Index;
2840  Result = !::CheckAndImm(Table, Index, N, SDISel);
2841  return Index;
2843  Result = !::CheckOrImm(Table, Index, N, SDISel);
2844  return Index;
2845  }
2846 }
2847 
2848 namespace {
2849 
2850 struct MatchScope {
2851  /// FailIndex - If this match fails, this is the index to continue with.
2852  unsigned FailIndex;
2853 
2854  /// NodeStack - The node stack when the scope was formed.
2855  SmallVector<SDValue, 4> NodeStack;
2856 
2857  /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
2858  unsigned NumRecordedNodes;
2859 
2860  /// NumMatchedMemRefs - The number of matched memref entries.
2861  unsigned NumMatchedMemRefs;
2862 
2863  /// InputChain/InputGlue - The current chain/glue
2864  SDValue InputChain, InputGlue;
2865 
2866  /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
2867  bool HasChainNodesMatched;
2868 };
2869 
2870 /// \\brief A DAG update listener to keep the matching state
2871 /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
2872 /// change the DAG while matching. X86 addressing mode matcher is an example
2873 /// for this.
2874 class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
2875 {
2876  SDNode **NodeToMatch;
2878  SmallVectorImpl<MatchScope> &MatchScopes;
2879 
2880 public:
2881  MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
2882  SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
2884  : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
2885  RecordedNodes(RN), MatchScopes(MS) {}
2886 
2887  void NodeDeleted(SDNode *N, SDNode *E) override {
2888  // Some early-returns here to avoid the search if we deleted the node or
2889  // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
2890  // do, so it's unnecessary to update matching state at that point).
2891  // Neither of these can occur currently because we only install this
2892  // update listener during matching a complex patterns.
2893  if (!E || E->isMachineOpcode())
2894  return;
2895  // Check if NodeToMatch was updated.
2896  if (N == *NodeToMatch)
2897  *NodeToMatch = E;
2898  // Performing linear search here does not matter because we almost never
2899  // run this code. You'd have to have a CSE during complex pattern
2900  // matching.
2901  for (auto &I : RecordedNodes)
2902  if (I.first.getNode() == N)
2903  I.first.setNode(E);
2904 
2905  for (auto &I : MatchScopes)
2906  for (auto &J : I.NodeStack)
2907  if (J.getNode() == N)
2908  J.setNode(E);
2909  }
2910 };
2911 
2912 } // end anonymous namespace
2913 
2915  const unsigned char *MatcherTable,
2916  unsigned TableSize) {
2917  // FIXME: Should these even be selected? Handle these cases in the caller?
2918  switch (NodeToMatch->getOpcode()) {
2919  default:
2920  break;
2921  case ISD::EntryToken: // These nodes remain the same.
2922  case ISD::BasicBlock:
2923  case ISD::Register:
2924  case ISD::RegisterMask:
2925  case ISD::HANDLENODE:
2926  case ISD::MDNODE_SDNODE:
2927  case ISD::TargetConstant:
2928  case ISD::TargetConstantFP:
2930  case ISD::TargetFrameIndex:
2932  case ISD::MCSymbol:
2934  case ISD::TargetJumpTable:
2937  case ISD::TokenFactor:
2938  case ISD::CopyFromReg:
2939  case ISD::CopyToReg:
2940  case ISD::EH_LABEL:
2941  case ISD::ANNOTATION_LABEL:
2942  case ISD::LIFETIME_START:
2943  case ISD::LIFETIME_END:
2944  NodeToMatch->setNodeId(-1); // Mark selected.
2945  return;
2946  case ISD::AssertSext:
2947  case ISD::AssertZext:
2948  CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
2949  NodeToMatch->getOperand(0));
2950  CurDAG->RemoveDeadNode(NodeToMatch);
2951  return;
2952  case ISD::INLINEASM:
2953  Select_INLINEASM(NodeToMatch);
2954  return;
2955  case ISD::READ_REGISTER:
2956  Select_READ_REGISTER(NodeToMatch);
2957  return;
2958  case ISD::WRITE_REGISTER:
2959  Select_WRITE_REGISTER(NodeToMatch);
2960  return;
2961  case ISD::UNDEF:
2962  Select_UNDEF(NodeToMatch);
2963  return;
2964  }
2965 
2966  assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
2967 
2968  // Set up the node stack with NodeToMatch as the only node on the stack.
2969  SmallVector<SDValue, 8> NodeStack;
2970  SDValue N = SDValue(NodeToMatch, 0);
2971  NodeStack.push_back(N);
2972 
2973  // MatchScopes - Scopes used when matching, if a match failure happens, this
2974  // indicates where to continue checking.
2975  SmallVector<MatchScope, 8> MatchScopes;
2976 
2977  // RecordedNodes - This is the set of nodes that have been recorded by the
2978  // state machine. The second value is the parent of the node, or null if the
2979  // root is recorded.
2980  SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
2981 
2982  // MatchedMemRefs - This is the set of MemRef's we've seen in the input
2983  // pattern.
2984  SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
2985 
2986  // These are the current input chain and glue for use when generating nodes.
2987  // Various Emit operations change these. For example, emitting a copytoreg
2988  // uses and updates these.
2989  SDValue InputChain, InputGlue;
2990 
2991  // ChainNodesMatched - If a pattern matches nodes that have input/output
2992  // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
2993  // which ones they are. The result is captured into this list so that we can
2994  // update the chain results when the pattern is complete.
2995  SmallVector<SDNode*, 3> ChainNodesMatched;
2996 
2997  DEBUG(dbgs() << "ISEL: Starting pattern match\n");
2998 
2999  // Determine where to start the interpreter. Normally we start at opcode #0,
3000  // but if the state machine starts with an OPC_SwitchOpcode, then we
3001  // accelerate the first lookup (which is guaranteed to be hot) with the
3002  // OpcodeOffset table.
3003  unsigned MatcherIndex = 0;
3004 
3005  if (!OpcodeOffset.empty()) {
3006  // Already computed the OpcodeOffset table, just index into it.
3007  if (N.getOpcode() < OpcodeOffset.size())
3008  MatcherIndex = OpcodeOffset[N.getOpcode()];
3009  DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3010 
3011  } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3012  // Otherwise, the table isn't computed, but the state machine does start
3013  // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3014  // is the first time we're selecting an instruction.
3015  unsigned Idx = 1;
3016  while (true) {
3017  // Get the size of this case.
3018  unsigned CaseSize = MatcherTable[Idx++];
3019  if (CaseSize & 128)
3020  CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
3021  if (CaseSize == 0) break;
3022 
3023  // Get the opcode, add the index to the table.
3024  uint16_t Opc = MatcherTable[Idx++];
3025  Opc |= (unsigned short)MatcherTable[Idx++] << 8;
3026  if (Opc >= OpcodeOffset.size())
3027  OpcodeOffset.resize((Opc+1)*2);
3028  OpcodeOffset[Opc] = Idx;
3029  Idx += CaseSize;
3030  }
3031 
3032  // Okay, do the lookup for the first opcode.
3033  if (N.getOpcode() < OpcodeOffset.size())
3034  MatcherIndex = OpcodeOffset[N.getOpcode()];
3035  }
3036 
3037  while (true) {
3038  assert(MatcherIndex < TableSize && "Invalid index");
3039 #ifndef NDEBUG
3040  unsigned CurrentOpcodeIndex = MatcherIndex;
3041 #endif
3042  BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
3043  switch (Opcode) {
3044  case OPC_Scope: {
3045  // Okay, the semantics of this operation are that we should push a scope
3046  // then evaluate the first child. However, pushing a scope only to have
3047  // the first check fail (which then pops it) is inefficient. If we can
3048  // determine immediately that the first check (or first several) will
3049  // immediately fail, don't even bother pushing a scope for them.
3050  unsigned FailIndex;
3051 
3052  while (true) {
3053  unsigned NumToSkip = MatcherTable[MatcherIndex++];
3054  if (NumToSkip & 128)
3055  NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3056  // Found the end of the scope with no match.
3057  if (NumToSkip == 0) {
3058  FailIndex = 0;
3059  break;
3060  }
3061 
3062  FailIndex = MatcherIndex+NumToSkip;
3063 
3064  unsigned MatcherIndexOfPredicate = MatcherIndex;
3065  (void)MatcherIndexOfPredicate; // silence warning.
3066 
3067  // If we can't evaluate this predicate without pushing a scope (e.g. if
3068  // it is a 'MoveParent') or if the predicate succeeds on this node, we
3069  // push the scope and evaluate the full predicate chain.
3070  bool Result;
3071  MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
3072  Result, *this, RecordedNodes);
3073  if (!Result)
3074  break;
3075 
3076  DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at "
3077  << "index " << MatcherIndexOfPredicate
3078  << ", continuing at " << FailIndex << "\n");
3079  ++NumDAGIselRetries;
3080 
3081  // Otherwise, we know that this case of the Scope is guaranteed to fail,
3082  // move to the next case.
3083  MatcherIndex = FailIndex;
3084  }
3085 
3086  // If the whole scope failed to match, bail.
3087  if (FailIndex == 0) break;
3088 
3089  // Push a MatchScope which indicates where to go if the first child fails
3090  // to match.
3091  MatchScope NewEntry;
3092  NewEntry.FailIndex = FailIndex;
3093  NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
3094  NewEntry.NumRecordedNodes = RecordedNodes.size();
3095  NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3096  NewEntry.InputChain = InputChain;
3097  NewEntry.InputGlue = InputGlue;
3098  NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3099  MatchScopes.push_back(NewEntry);
3100  continue;
3101  }
3102  case OPC_RecordNode: {
3103  // Remember this node, it may end up being an operand in the pattern.
3104  SDNode *Parent = nullptr;
3105  if (NodeStack.size() > 1)
3106  Parent = NodeStack[NodeStack.size()-2].getNode();
3107  RecordedNodes.push_back(std::make_pair(N, Parent));
3108  continue;
3109  }
3110 
3114  case OPC_RecordChild6: case OPC_RecordChild7: {
3115  unsigned ChildNo = Opcode-OPC_RecordChild0;
3116  if (ChildNo >= N.getNumOperands())
3117  break; // Match fails if out of range child #.
3118 
3119  RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
3120  N.getNode()));
3121  continue;
3122  }
3123  case OPC_RecordMemRef:
3124  if (auto *MN = dyn_cast<MemSDNode>(N))
3125  MatchedMemRefs.push_back(MN->getMemOperand());
3126  else {
3127  DEBUG(
3128  dbgs() << "Expected MemSDNode ";
3129  N->dump(CurDAG);
3130  dbgs() << '\n'
3131  );
3132  }
3133 
3134  continue;
3135 
3136  case OPC_CaptureGlueInput:
3137  // If the current node has an input glue, capture it in InputGlue.
3138  if (N->getNumOperands() != 0 &&
3139  N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
3140  InputGlue = N->getOperand(N->getNumOperands()-1);
3141  continue;
3142 
3143  case OPC_MoveChild: {
3144  unsigned ChildNo = MatcherTable[MatcherIndex++];
3145  if (ChildNo >= N.getNumOperands())
3146  break; // Match fails if out of range child #.
3147  N = N.getOperand(ChildNo);
3148  NodeStack.push_back(N);
3149  continue;
3150  }
3151 
3152  case OPC_MoveChild0: case OPC_MoveChild1:
3153  case OPC_MoveChild2: case OPC_MoveChild3:
3154  case OPC_MoveChild4: case OPC_MoveChild5:
3155  case OPC_MoveChild6: case OPC_MoveChild7: {
3156  unsigned ChildNo = Opcode-OPC_MoveChild0;
3157  if (ChildNo >= N.getNumOperands())
3158  break; // Match fails if out of range child #.
3159  N = N.getOperand(ChildNo);
3160  NodeStack.push_back(N);
3161  continue;
3162  }
3163 
3164  case OPC_MoveParent:
3165  // Pop the current node off the NodeStack.
3166  NodeStack.pop_back();
3167  assert(!NodeStack.empty() && "Node stack imbalance!");
3168  N = NodeStack.back();
3169  continue;
3170 
3171  case OPC_CheckSame:
3172  if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3173  continue;
3174 
3177  if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3178  Opcode-OPC_CheckChild0Same))
3179  break;
3180  continue;
3181 
3183  if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
3184  continue;
3185  case OPC_CheckPredicate:
3186  if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
3187  N.getNode()))
3188  break;
3189  continue;
3190  case OPC_CheckComplexPat: {
3191  unsigned CPNum = MatcherTable[MatcherIndex++];
3192  unsigned RecNo = MatcherTable[MatcherIndex++];
3193  assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3194 
3195  // If target can modify DAG during matching, keep the matching state
3196  // consistent.
3197  std::unique_ptr<MatchStateUpdater> MSU;
3199  MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3200  MatchScopes));
3201 
3202  if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
3203  RecordedNodes[RecNo].first, CPNum,
3204  RecordedNodes))
3205  break;
3206  continue;
3207  }
3208  case OPC_CheckOpcode:
3209  if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
3210  continue;
3211 
3212  case OPC_CheckType:
3213  if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
3214  CurDAG->getDataLayout()))
3215  break;
3216  continue;
3217 
3218  case OPC_CheckTypeRes: {
3219  unsigned Res = MatcherTable[MatcherIndex++];
3220  if (!::CheckType(MatcherTable, MatcherIndex, N.getValue(Res), TLI,
3221  CurDAG->getDataLayout()))
3222  break;
3223  continue;
3224  }
3225 
3226  case OPC_SwitchOpcode: {
3227  unsigned CurNodeOpcode = N.getOpcode();
3228  unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3229  unsigned CaseSize;
3230  while (true) {
3231  // Get the size of this case.
3232  CaseSize = MatcherTable[MatcherIndex++];
3233  if (CaseSize & 128)
3234  CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3235  if (CaseSize == 0) break;
3236 
3237  uint16_t Opc = MatcherTable[MatcherIndex++];
3238  Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3239 
3240  // If the opcode matches, then we will execute this case.
3241  if (CurNodeOpcode == Opc)
3242  break;
3243 
3244  // Otherwise, skip over this case.
3245  MatcherIndex += CaseSize;
3246  }
3247 
3248  // If no cases matched, bail out.
3249  if (CaseSize == 0) break;
3250 
3251  // Otherwise, execute the case we found.
3252  DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart
3253  << " to " << MatcherIndex << "\n");
3254  continue;
3255  }
3256 
3257  case OPC_SwitchType: {
3258  MVT CurNodeVT = N.getSimpleValueType();
3259  unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3260  unsigned CaseSize;
3261  while (true) {
3262  // Get the size of this case.
3263  CaseSize = MatcherTable[MatcherIndex++];
3264  if (CaseSize & 128)
3265  CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3266  if (CaseSize == 0) break;
3267 
3268  MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3269  if (CaseVT == MVT::iPTR)
3270  CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
3271 
3272  // If the VT matches, then we will execute this case.
3273  if (CurNodeVT == CaseVT)
3274  break;
3275 
3276  // Otherwise, skip over this case.
3277  MatcherIndex += CaseSize;
3278  }
3279 
3280  // If no cases matched, bail out.
3281  if (CaseSize == 0) break;
3282 
3283  // Otherwise, execute the case we found.
3284  DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
3285  << "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
3286  continue;
3287  }
3292  if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
3293  CurDAG->getDataLayout(),
3294  Opcode - OPC_CheckChild0Type))
3295  break;
3296  continue;
3297  case OPC_CheckCondCode:
3298  if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3299  continue;
3300  case OPC_CheckValueType:
3301  if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3302  CurDAG->getDataLayout()))
3303  break;
3304  continue;
3305  case OPC_CheckInteger:
3306  if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3307  continue;
3311  if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3312  Opcode-OPC_CheckChild0Integer)) break;
3313  continue;
3314  case OPC_CheckAndImm:
3315  if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
3316  continue;
3317  case OPC_CheckOrImm:
3318  if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
3319  continue;
3320 
3322  assert(NodeStack.size() != 1 && "No parent node");
3323  // Verify that all intermediate nodes between the root and this one have
3324  // a single use.
3325  bool HasMultipleUses = false;
3326  for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
3327  if (!NodeStack[i].getNode()->hasOneUse()) {
3328  HasMultipleUses = true;
3329  break;
3330  }
3331  if (HasMultipleUses) break;
3332 
3333  // Check to see that the target thinks this is profitable to fold and that
3334  // we can fold it without inducing cycles in the graph.
3335  if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3336  NodeToMatch) ||
3337  !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3338  NodeToMatch, OptLevel,
3339  true/*We validate our own chains*/))
3340  break;
3341 
3342  continue;
3343  }
3344  case OPC_EmitInteger: {
3346  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3347  int64_t Val = MatcherTable[MatcherIndex++];
3348  if (Val & 128)
3349  Val = GetVBR(Val, MatcherTable, MatcherIndex);
3350  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3351  CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
3352  VT), nullptr));
3353  continue;
3354  }
3355  case OPC_EmitRegister: {
3357  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3358  unsigned RegNo = MatcherTable[MatcherIndex++];
3359  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3360  CurDAG->getRegister(RegNo, VT), nullptr));
3361  continue;
3362  }
3363  case OPC_EmitRegister2: {
3364  // For targets w/ more than 256 register names, the register enum
3365  // values are stored in two bytes in the matcher table (just like
3366  // opcodes).
3368  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3369  unsigned RegNo = MatcherTable[MatcherIndex++];
3370  RegNo |= MatcherTable[MatcherIndex++] << 8;
3371  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3372  CurDAG->getRegister(RegNo, VT), nullptr));
3373  continue;
3374  }
3375 
3376  case OPC_EmitConvertToTarget: {
3377  // Convert from IMM/FPIMM to target version.
3378  unsigned RecNo = MatcherTable[MatcherIndex++];
3379  assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3380  SDValue Imm = RecordedNodes[RecNo].first;
3381 
3382  if (Imm->getOpcode() == ISD::Constant) {
3383  const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
3384  Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
3385  Imm.getValueType());
3386  } else if (Imm->getOpcode() == ISD::ConstantFP) {
3387  const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
3388  Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
3389  Imm.getValueType());
3390  }
3391 
3392  RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
3393  continue;
3394  }
3395 
3396  case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3397  case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3398  case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3399  // These are space-optimized forms of OPC_EmitMergeInputChains.
3400  assert(!InputChain.getNode() &&
3401  "EmitMergeInputChains should be the first chain producing node");
3402  assert(ChainNodesMatched.empty() &&
3403  "Should only have one EmitMergeInputChains per match");
3404 
3405  // Read all of the chained nodes.
3406  unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3407  assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3408  ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3409 
3410  // FIXME: What if other value results of the node have uses not matched
3411  // by this pattern?
3412  if (ChainNodesMatched.back() != NodeToMatch &&
3413  !RecordedNodes[RecNo].first.hasOneUse()) {
3414  ChainNodesMatched.clear();
3415  break;
3416  }
3417 
3418  // Merge the input chains if they are not intra-pattern references.
3419  InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3420 
3421  if (!InputChain.getNode())
3422  break; // Failed to merge.
3423  continue;
3424  }
3425 
3426  case OPC_EmitMergeInputChains: {
3427  assert(!InputChain.getNode() &&
3428  "EmitMergeInputChains should be the first chain producing node");
3429  // This node gets a list of nodes we matched in the input that have
3430  // chains. We want to token factor all of the input chains to these nodes
3431  // together. However, if any of the input chains is actually one of the
3432  // nodes matched in this pattern, then we have an intra-match reference.
3433  // Ignore these because the newly token factored chain should not refer to
3434  // the old nodes.
3435  unsigned NumChains = MatcherTable[MatcherIndex++];
3436  assert(NumChains != 0 && "Can't TF zero chains");
3437 
3438  assert(ChainNodesMatched.empty() &&
3439  "Should only have one EmitMergeInputChains per match");
3440 
3441  // Read all of the chained nodes.
3442  for (unsigned i = 0; i != NumChains; ++i) {
3443  unsigned RecNo = MatcherTable[MatcherIndex++];
3444  assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3445  ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3446 
3447  // FIXME: What if other value results of the node have uses not matched
3448  // by this pattern?
3449  if (ChainNodesMatched.back() != NodeToMatch &&
3450  !RecordedNodes[RecNo].first.hasOneUse()) {
3451  ChainNodesMatched.clear();
3452  break;
3453  }
3454  }
3455 
3456  // If the inner loop broke out, the match fails.
3457  if (ChainNodesMatched.empty())
3458  break;
3459 
3460  // Merge the input chains if they are not intra-pattern references.
3461  InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3462 
3463  if (!InputChain.getNode())
3464  break; // Failed to merge.
3465 
3466  continue;
3467  }
3468 
3469  case OPC_EmitCopyToReg: {
3470  unsigned RecNo = MatcherTable[MatcherIndex++];
3471  assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3472  unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3473 
3474  if (!InputChain.getNode())
3475  InputChain = CurDAG->getEntryNode();
3476 
3477  InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
3478  DestPhysReg, RecordedNodes[RecNo].first,
3479  InputGlue);
3480 
3481  InputGlue = InputChain.getValue(1);
3482  continue;
3483  }
3484 
3485  case OPC_EmitNodeXForm: {
3486  unsigned XFormNo = MatcherTable[MatcherIndex++];
3487  unsigned RecNo = MatcherTable[MatcherIndex++];
3488  assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3489  SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
3490  RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
3491  continue;
3492  }
3493  case OPC_Coverage: {
3494  // This is emitted right before MorphNode/EmitNode.
3495  // So it should be safe to assume that this node has been selected
3496  unsigned index = MatcherTable[MatcherIndex++];
3497  index |= (MatcherTable[MatcherIndex++] << 8);
3498  dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3499  dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3500  continue;
3501  }
3502 
3503  case OPC_EmitNode: case OPC_MorphNodeTo:
3504  case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2:
3506  uint16_t TargetOpc = MatcherTable[MatcherIndex++];
3507  TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3508  unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
3509  // Get the result VT list.
3510  unsigned NumVTs;
3511  // If this is one of the compressed forms, get the number of VTs based
3512  // on the Opcode. Otherwise read the next byte from the table.
3513  if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
3514  NumVTs = Opcode - OPC_MorphNodeTo0;
3515  else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
3516  NumVTs = Opcode - OPC_EmitNode0;
3517  else
3518  NumVTs = MatcherTable[MatcherIndex++];
3519  SmallVector<EVT, 4> VTs;
3520  for (unsigned i = 0; i != NumVTs; ++i) {
3522  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3523  if (VT == MVT::iPTR)
3524  VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
3525  VTs.push_back(VT);
3526  }
3527 
3528  if (EmitNodeInfo & OPFL_Chain)
3529  VTs.push_back(MVT::Other);
3530  if (EmitNodeInfo & OPFL_GlueOutput)
3531  VTs.push_back(MVT::Glue);
3532 
3533  // This is hot code, so optimize the two most common cases of 1 and 2
3534  // results.
3535  SDVTList VTList;
3536  if (VTs.size() == 1)
3537  VTList = CurDAG->getVTList(VTs[0]);
3538  else if (VTs.size() == 2)
3539  VTList = CurDAG->getVTList(VTs[0], VTs[1]);
3540  else
3541  VTList = CurDAG->getVTList(VTs);
3542 
3543  // Get the operand list.
3544  unsigned NumOps = MatcherTable[MatcherIndex++];
3546  for (unsigned i = 0; i != NumOps; ++i) {
3547  unsigned RecNo = MatcherTable[MatcherIndex++];
3548  if (RecNo & 128)
3549  RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
3550 
3551  assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
3552  Ops.push_back(RecordedNodes[RecNo].first);
3553  }
3554 
3555  // If there are variadic operands to add, handle them now.
3556  if (EmitNodeInfo & OPFL_VariadicInfo) {
3557  // Determine the start index to copy from.
3558  unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
3559  FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
3560  assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
3561  "Invalid variadic node");
3562  // Copy all of the variadic operands, not including a potential glue
3563  // input.
3564  for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
3565  i != e; ++i) {
3566  SDValue V = NodeToMatch->getOperand(i);
3567  if (V.getValueType() == MVT::Glue) break;
3568  Ops.push_back(V);
3569  }
3570  }
3571 
3572  // If this has chain/glue inputs, add them.
3573  if (EmitNodeInfo & OPFL_Chain)
3574  Ops.push_back(InputChain);
3575  if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
3576  Ops.push_back(InputGlue);
3577 
3578  // Create the node.
3579  MachineSDNode *Res = nullptr;
3580  bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo ||
3581  (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2);
3582  if (!IsMorphNodeTo) {
3583  // If this is a normal EmitNode command, just create the new node and
3584  // add the results to the RecordedNodes list.
3585  Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
3586  VTList, Ops);
3587 
3588  // Add all the non-glue/non-chain results to the RecordedNodes list.
3589  for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
3590  if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
3591  RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
3592  nullptr));
3593  }
3594  } else {
3595  assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
3596  "NodeToMatch was removed partway through selection");
3598  SDNode *E) {
3599  CurDAG->salvageDebugInfo(*N);
3600  auto &Chain = ChainNodesMatched;
3601  assert((!E || !is_contained(Chain, N)) &&
3602  "Chain node replaced during MorphNode");
3603  Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end());
3604  });
3605  Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList,
3606  Ops, EmitNodeInfo));
3607  }
3608 
3609  // If the node had chain/glue results, update our notion of the current
3610  // chain and glue.
3611  if (EmitNodeInfo & OPFL_GlueOutput) {
3612  InputGlue = SDValue(Res, VTs.size()-1);
3613  if (EmitNodeInfo & OPFL_Chain)
3614  InputChain = SDValue(Res, VTs.size()-2);
3615  } else if (EmitNodeInfo & OPFL_Chain)
3616  InputChain = SDValue(Res, VTs.size()-1);
3617 
3618  // If the OPFL_MemRefs glue is set on this node, slap all of the
3619  // accumulated memrefs onto it.
3620  //
3621  // FIXME: This is vastly incorrect for patterns with multiple outputs
3622  // instructions that access memory and for ComplexPatterns that match
3623  // loads.
3624  if (EmitNodeInfo & OPFL_MemRefs) {
3625  // Only attach load or store memory operands if the generated
3626  // instruction may load or store.
3627  const MCInstrDesc &MCID = TII->get(TargetOpc);
3628  bool mayLoad = MCID.mayLoad();
3629  bool mayStore = MCID.mayStore();
3630 
3631  unsigned NumMemRefs = 0;
3633  MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
3634  if ((*I)->isLoad()) {
3635  if (mayLoad)
3636  ++NumMemRefs;
3637  } else if ((*I)->isStore()) {
3638  if (mayStore)
3639  ++NumMemRefs;
3640  } else {
3641  ++NumMemRefs;
3642  }
3643  }
3644 
3645  MachineSDNode::mmo_iterator MemRefs =
3646  MF->allocateMemRefsArray(NumMemRefs);
3647 
3648  MachineSDNode::mmo_iterator MemRefsPos = MemRefs;
3650  MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
3651  if ((*I)->isLoad()) {
3652  if (mayLoad)
3653  *MemRefsPos++ = *I;
3654  } else if ((*I)->isStore()) {
3655  if (mayStore)
3656  *MemRefsPos++ = *I;
3657  } else {
3658  *MemRefsPos++ = *I;
3659  }
3660  }
3661 
3662  Res->setMemRefs(MemRefs, MemRefs + NumMemRefs);
3663  }
3664 
3665  DEBUG(
3666  if (!MatchedMemRefs.empty() && Res->memoperands_empty())
3667  dbgs() << " Dropping mem operands\n";
3668  dbgs() << " "
3669  << (IsMorphNodeTo ? "Morphed" : "Created")
3670  << " node: ";
3671  Res->dump(CurDAG);
3672  );
3673 
3674  // If this was a MorphNodeTo then we're completely done!
3675  if (IsMorphNodeTo) {
3676  // Update chain uses.
3677  UpdateChains(Res, InputChain, ChainNodesMatched, true);
3678  return;
3679  }
3680  continue;
3681  }
3682 
3683  case OPC_CompleteMatch: {
3684  // The match has been completed, and any new nodes (if any) have been
3685  // created. Patch up references to the matched dag to use the newly
3686  // created nodes.
3687  unsigned NumResults = MatcherTable[MatcherIndex++];
3688 
3689  for (unsigned i = 0; i != NumResults; ++i) {
3690  unsigned ResSlot = MatcherTable[MatcherIndex++];
3691  if (ResSlot & 128)
3692  ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
3693 
3694  assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
3695  SDValue Res = RecordedNodes[ResSlot].first;
3696 
3697  assert(i < NodeToMatch->getNumValues() &&
3698  NodeToMatch->getValueType(i) != MVT::Other &&
3699  NodeToMatch->getValueType(i) != MVT::Glue &&
3700  "Invalid number of results to complete!");
3701  assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
3702  NodeToMatch->getValueType(i) == MVT::iPTR ||
3703  Res.getValueType() == MVT::iPTR ||
3704  NodeToMatch->getValueType(i).getSizeInBits() ==
3705  Res.getValueSizeInBits()) &&
3706  "invalid replacement");
3707  CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
3708  }
3709 
3710  // Update chain uses.
3711  UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
3712 
3713  // If the root node defines glue, we need to update it to the glue result.
3714  // TODO: This never happens in our tests and I think it can be removed /
3715  // replaced with an assert, but if we do it this the way the change is
3716  // NFC.
3717  if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
3718  MVT::Glue &&
3719  InputGlue.getNode())
3721  SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1), InputGlue);
3722 
3723  assert(NodeToMatch->use_empty() &&
3724  "Didn't replace all uses of the node?");
3725  CurDAG->RemoveDeadNode(NodeToMatch);
3726 
3727  return;
3728  }
3729  }
3730 
3731  // If the code reached this point, then the match failed. See if there is
3732  // another child to try in the current 'Scope', otherwise pop it until we
3733  // find a case to check.
3734  DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
3735  ++NumDAGIselRetries;
3736  while (true) {
3737  if (MatchScopes.empty()) {
3738  CannotYetSelect(NodeToMatch);
3739  return;
3740  }
3741 
3742  // Restore the interpreter state back to the point where the scope was
3743  // formed.
3744  MatchScope &LastScope = MatchScopes.back();
3745  RecordedNodes.resize(LastScope.NumRecordedNodes);
3746  NodeStack.clear();
3747  NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
3748  N = NodeStack.back();
3749 
3750  if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
3751  MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
3752  MatcherIndex = LastScope.FailIndex;
3753 
3754  DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
3755 
3756  InputChain = LastScope.InputChain;
3757  InputGlue = LastScope.InputGlue;
3758  if (!LastScope.HasChainNodesMatched)
3759  ChainNodesMatched.clear();
3760 
3761  // Check to see what the offset is at the new MatcherIndex. If it is zero
3762  // we have reached the end of this scope, otherwise we have another child
3763  // in the current scope to try.
3764  unsigned NumToSkip = MatcherTable[MatcherIndex++];
3765  if (NumToSkip & 128)
3766  NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3767 
3768  // If we have another child in this scope to match, update FailIndex and
3769  // try it.
3770  if (NumToSkip != 0) {
3771  LastScope.FailIndex = MatcherIndex+NumToSkip;
3772  break;
3773  }
3774 
3775  // End of this scope, pop it and try the next child in the containing
3776  // scope.
3777  MatchScopes.pop_back();
3778  }
3779  }
3780 }
3781 
3783  assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
3784  auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3785  if (!C)
3786  return false;
3787 
3788  // Detect when "or" is used to add an offset to a stack object.
3789  if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) {
3790  MachineFrameInfo &MFI = MF->getFrameInfo();
3791  unsigned A = MFI.getObjectAlignment(FN->getIndex());
3792  assert(isPowerOf2_32(A) && "Unexpected alignment");
3793  int32_t Off = C->getSExtValue();
3794  // If the alleged offset fits in the zero bits guaranteed by
3795  // the alignment, then this or is really an add.
3796  return (Off >= 0) && (((A - 1) & Off) == unsigned(Off));
3797  }
3798  return false;
3799 }
3800 
3801 void SelectionDAGISel::CannotYetSelect(SDNode *N) {
3802  std::string msg;
3803  raw_string_ostream Msg(msg);
3804  Msg << "Cannot select: ";
3805 
3806  if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
3808  N->getOpcode() != ISD::INTRINSIC_VOID) {
3809  N->printrFull(Msg, CurDAG);
3810  Msg << "\nIn function: " << MF->getName();
3811  } else {
3812  bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
3813  unsigned iid =
3814  cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
3815  if (iid < Intrinsic::num_intrinsics)
3816  Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid, None);
3817  else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
3818  Msg << "target intrinsic %" << TII->getName(iid);
3819  else
3820  Msg << "unknown intrinsic #" << iid;
3821  }
3822  report_fatal_error(Msg.str());
3823 }
3824 
3825 char SelectionDAGISel::ID = 0;
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
Definition: ISDOpcodes.h:646
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
uint64_t CallInst * C
Return a value (possibly void), from a function.
std::vector< BitTestBlock > BitTestCases
BitTestCases - Vector of BitTestBlock structures used to communicate SwitchInst code generation infor...
Diagnostic information for ISel fallback path.
SelectionDAGBuilder * SDB
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
mop_iterator operands_end()
Definition: MachineInstr.h:330
EVT getValueType() const
Return the ValueType of the referenced return value.
static SDNode * findGlueUse(SDNode *N)
findGlueUse - Return use of MVT::Glue value produced by the specified SDNode.
void EmitLiveInCopies(MachineBasicBlock *EntryMBB, const TargetRegisterInfo &TRI, const TargetInstrInfo &TII)
EmitLiveInCopies - Emit copies to initialize livein virtual registers into the given entry block...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, unsigned Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable.
bool hasPostISelHook(QueryType Type=IgnoreBundle) const
Return true if this instruction requires adjustment after instruction selection by calling a target h...
Definition: MachineInstr.h:708
Diagnostic information for missed-optimization remarks.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1542
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static int getNumFixedFromVariadicInfo(unsigned Flags)
getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the number of fixed arity values ...
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:228
bool LegalizeTypes()
This transforms the SelectionDAG into a SelectionDAG that only uses types natively supported by the t...
DiagnosticInfoOptimizationBase::Argument NV
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:329
GCFunctionInfo * GFI
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it&#39;s profitable to fold the specific operand node N of U during ...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:42
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR...
Definition: ISDOpcodes.h:704
static unsigned virtReg2Index(unsigned Reg)
Convert a virtual register number to a 0-based index.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Various leaf nodes.
Definition: ISDOpcodes.h:60
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad. Returns the label ID for the landing pad entry.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineSDNodes&#39;s memory reference descriptor list.
static cl::opt< bool > ViewISelDAGs("view-isel-dags", cl::Hidden, cl::desc("Pop up a window to show isel dags as they are selected"))
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
unsigned getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
LPadToCallSiteMap - Map a landing pad to the call site indexes.
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo)
Propagate swifterror values through the machine function CFG.
static ChainResult WalkChainUsers(const SDNode *ChainedNode, SmallVectorImpl< SDNode *> &ChainedNodesInPattern, DenseMap< const SDNode *, ChainResult > &TokenFactorResult, SmallVectorImpl< SDNode *> &InteriorChainedNodes)
WalkChainUsers - Walk down the users of the specified chained node that is part of the pattern we&#39;re ...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const
CheckAndMask - The isel is trying to match something like (and X, 255).
const MachineFunctionProperties & getProperties() const
Get the function properties.
virtual unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const
Return the register ID of the name passed in.
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:271
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
static unsigned getFlagWord(unsigned Kind, unsigned NumOps)
Definition: InlineAsm.h:269
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
void initializeTargetLibraryInfoWrapperPassPass(PassRegistry &)
Clients of various APIs that cause global effects on the DAG can optionally implement this interface...
Definition: SelectionDAG.h:265
unsigned getReg() const
getReg - Returns the register number.
friend struct DAGUpdateListener
DAGUpdateListener is a friend so it can manipulate the listener stack.
Definition: SelectionDAG.h:307
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool LegalizeVectors()
This transforms the SelectionDAG into a SelectionDAG that only uses vector math operations supported ...
This file contains the declarations for metadata subclasses.
virtual const TargetLowering * getTargetLowering() const
iterator_range< IterTy > args() const
Definition: CallSite.h:215
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1308
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
unsigned getResNo() const
Convenience function for get().getResNo().
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
Definition: SelectionDAG.h:303
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition: ISDOpcodes.h:131
static const MCPhysReg VRegs[32]
void getLocation(StringRef *Filename, unsigned *Line, unsigned *Column) const
Return location information for this diagnostic in three parts: the source file name, line number and column.
unsigned second
arg_iterator arg_end()
Definition: Function.h:658
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:862
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1067
An instruction for reading from memory.
Definition: Instructions.h:164
RegisterPassParser class - Handle the addition of new machine passes.
void setNodeId(int Id)
Set unique node id.
SDNode * getNode() const
get the SDNode which holds the desired result
static MachineBasicBlock::iterator FindSplitPointForStackProtector(MachineBasicBlock *BB)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:455
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:387
bool isPHI() const
Definition: MachineInstr.h:829
void viewGraph(const std::string &Title)
Pop up a GraphViz/gv window with the DAG rendered using &#39;dot&#39;.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL, unsigned ChildNo)
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:45
void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *, unsigned)
Set the swifterror virtual register in the SwiftErrorVRegDefMap for this basic block.
static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx)
isUseOperandTiedToDef - Return true if the flag of the inline asm operand indicates it is an use oper...
Definition: InlineAsm.h:342
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:245
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
MachineFunction * MF
ScheduleDAGSDNodes * createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createHybridListDAGScheduler - This creates a bottom up register pressure aware list scheduler that m...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes, unsigned ChildNo)
CheckChildSame - Implements OP_CheckChildXSame.
const TargetLibraryInfo * LibInfo
void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG)
set - Initialize this FunctionLoweringInfo with the given Function and its associated MachineFunction...
bool isGCRelocate(ImmutableCallSite CS)
Definition: Statepoint.cpp:43
static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI, FunctionLoweringInfo *FuncInfo)
Set up SwiftErrorVals by going through the function.
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1382
static bool isFoldedOrDeadInstruction(const Instruction *I, FunctionLoweringInfo *FuncInfo)
isFoldedOrDeadInstruction - Return true if the specified instruction is side-effect free and is eithe...
StackProtectorDescriptor SPDescriptor
A StackProtectorDescriptor structure used to communicate stack protector information in between Selec...
std::pair< unsigned, bool > getOrCreateSwiftErrorVRegDefAt(const Instruction *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes)
CheckSame - Implements OP_CheckSame.
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
SDValue getRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block...
AnalysisUsage & addRequired()
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:238
A description of a memory reference used in the backend.
void resetTargetOptions(const Function &F) const
Reset the target options based on the function&#39;s attributes.
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:591
void visitSwitchCase(CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
Option class for critical edge splitting.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
virtual bool ComplexPatternFuncMutatesDAG() const
Return true if complex patterns for this target can mutate the DAG.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
This class is basically a combination of TimeRegion and Timer.
Definition: Timer.h:160
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
const Value * SwiftErrorArg
The swifterror argument of the current function.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
void visitJumpTable(JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
const TargetLowering * TLI
#define LLVM_ATTRIBUTE_ALWAYS_INLINE
LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do so, mark a method "always...
Definition: Compiler.h:198
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:170
const MDNode * getMD() const
virtual bool enableMachineSchedDefaultSched() const
True if the machine scheduler should disable the TLI preference for preRA scheduling with the source ...
op_iterator op_end() const
static MachinePassRegistry Registry
RegisterScheduler class - Track the registration of instruction schedulers.
Reg
All possible values of the reg field in the ModR/M byte.
virtual void Select(SDNode *N)=0
Main hook for targets to transform nodes into machine nodes.
ScheduleDAGSDNodes *(*)(SelectionDAGISel *, CodeGenOpt::Level) FunctionPassCtor
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:449
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:390
An analysis pass which caches information about the entire Module.
Definition: GCMetadata.h:154
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDNode *N)
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This file implements a class to represent arbitrary precision integral constant values and operations...
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:635
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
unsigned DAGSize
DAGSize - Size of DAG being instruction selected.
void initializeAAResultsWrapperPassPass(PassRegistry &)
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
defusechain_iterator - This class provides iterator support for machine operands in the function that...
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
int64_t getSExtValue() const
Key
PAL metadata keys.
virtual StringRef getPatternForIndex(unsigned index)
getPatternForIndex - Patterns selected by tablegen during ISEL
static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI)
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:67
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:867
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:471
bool canTrap() const
Return true if evaluation of this constant could trap.
Definition: Constants.cpp:409
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:759
SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
#define T
ScheduleDAGSDNodes * createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up register usage reduction list scheduler...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
static unsigned IsPredicateKnownToFail(const unsigned char *Table, unsigned Index, SDValue N, bool &Result, const SelectionDAGISel &SDISel, SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes)
IsPredicateKnownToFail - If we know how and can do so without pushing a scope, evaluate the current n...
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:205
An instruction for storing to memory.
Definition: Instructions.h:306
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:918
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they&#39;re not in a MachineFuncti...
op_iterator op_begin() const
MachinePassRegistry - Track the registration of machine passes.
void init(GCFunctionInfo *gfi, AliasAnalysis *AA, const TargetLibraryInfo *li)
virtual const TargetInstrInfo * getInstrInfo() const
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
Definition: ISDOpcodes.h:125
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:561
static LLVM_ATTRIBUTE_ALWAYS_INLINE uint64_t GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx)
GetVBR - decode a vbr encoding whose top bit is set.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:142
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
Definition: SelectionDAG.h:593
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Legacy analysis pass which computes BranchProbabilityInfo.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:880
void printrFull(raw_ostream &O, const SelectionDAG *G=nullptr) const
Print a SelectionDAG node and all children down to the leaves.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
static cl::opt< bool > EnableFastISelFallbackReport("fast-isel-report-on-fallback", cl::Hidden, cl::desc("Emit a diagnostic when \ast\instruction selection " "falls back to SelectionDAG."))
UNDEF - An undefined node.
Definition: ISDOpcodes.h:178
static cl::opt< bool > ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden, cl::desc("Pop up a window to show dags before the post legalize types" " dag combine pass"))
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
TargetInstrInfo - Interface to description of machine instruction set.
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:803
MachineRegisterInfo * RegInfo
void clear()
clear - Clear out all the function-specific state.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
const BasicBlock & getEntryBlock() const
Definition: Function.h:618
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
static SDValue HandleMergeInputChains(SmallVectorImpl< SDNode *> &ChainNodesMatched, SelectionDAG *CurDAG)
HandleMergeInputChains - This implements the OPC_EmitMergeInputChains operation for when the pattern ...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
BasicBlock * SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions())
If this edge is a critical edge, insert a new node to split the critical edge.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
CriticalEdgeSplittingOptions & setMergeIdenticalEdges()
virtual bool CheckPatternPredicate(unsigned PredNo) const
CheckPatternPredicate - This function is generated by tblgen in the target.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry &)
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
CodeGenOpt::Level OptLevel
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:171
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
Definition: ISDOpcodes.h:85
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:55
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned const MachineRegisterInfo * MRI
ScheduleDAGSDNodes * createVLIWDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createVLIWDAGScheduler - Scheduler for VLIW targets.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
bool HasTailCall
HasTailCall - This is set to true if a call in the current block has been translated as a tail call...
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static void preassignSwiftErrorRegs(const TargetLowering *TLI, FunctionLoweringInfo *FuncInfo, BasicBlock::const_iterator Begin, BasicBlock::const_iterator End)
void Legalize()
This transforms the SelectionDAG into a SelectionDAG that is compatible with the target instruction s...
virtual unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
ScheduleDAGSDNodes * createDefaultScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createDefaultScheduler - This creates an instruction scheduler appropriate for the target...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:421
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1505
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
std::pair< unsigned, bool > getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:375
iterator_range< value_op_iterator > op_values() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
const SDValue & getOperand(unsigned Num) const
ScheduleDAGSDNodes * createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createILPListDAGScheduler - This creates a bottom up register pressure aware list scheduler that trie...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:36
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool succ_empty(const BasicBlock *BB)
Definition: CFG.h:140
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
virtual unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:336
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
Represent the analysis usage information of a pass.
void Combine(CombineLevel Level, AliasAnalysis *AA, CodeGenOpt::Level OptLevel)
This iterates over the nodes in the SelectionDAG, folding certain types of nodes together, or eliminating superfluous nodes.
This class provides iterator support for SDUse operands that use a specific SDNode.
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:80
use_instr_iterator use_instr_begin(unsigned RegNo) const
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2096
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, unsigned ChildNo)
static unsigned getMemoryConstraintID(unsigned Flag)
Definition: InlineAsm.h:329
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:136
static const unsigned End
static cl::opt< bool > UseMBPI("use-mbpi", cl::desc("use Machine Branch Probability Info"), cl::init(true), cl::Hidden)
const APInt & getAPIntValue() const
DIExpression * getExpression() const
Definition: IntrinsicInst.h:84
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:57
arg_iterator arg_begin()
Definition: Function.h:649
void setHasInlineAsm(bool B)
Set a flag that indicates that the function contains inline assembly.
virtual void PostprocessISelDAG()
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection...
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
self_iterator getIterator()
Definition: ilist_node.h:82
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits, const KnownBits &Known)
AddLiveOutRegInfo - Adds LiveOutInfo for a register.
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:81
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:842
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint)
Augment an existing flag word returned by getFlagWord with the constraint code for a memory constrain...
Definition: InlineAsm.h:312
static cl::opt< int > EnableFastISelAbort("fast-isel-abort", cl::Hidden, cl::desc("Enable abort calls when \ast\instruction selection " "fails to lower an instruction: 0 disable the abort, 1 will " "abort but for args, calls and terminators, 2 will also " "abort for argument lowering, and 3 will never fallback " "to SelectionDAG."))
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:194
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const BasicBlock *, 4 > VisitedBBs
VisitedBBs - The set of basic blocks visited thus far by instruction selection.
static void reportFastISelFailure(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R, bool ShouldAbort)
static bool isMemKind(unsigned Flag)
Definition: InlineAsm.h:277
bool isCopy() const
Definition: MachineInstr.h:860
Extended Value Type.
Definition: ValueTypes.h:34
bool isImplicitDef() const
Definition: MachineInstr.h:834
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:51
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
void ComputePHILiveOutRegInfo(const PHINode *)
ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI&#39;s destination register based on the LiveOutI...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
static void SplitCriticalSideEffectEdges(Function &Fn, DominatorTree *DT, LoopInfo *LI)
SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that may trap on it...
unsigned getNumOperands() const
Return the number of values used by this operation.
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:478
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
HANDLENODE node - Used as a handle for various purposes.
Definition: ISDOpcodes.h:718
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
MachineBasicBlock * MBB
MBB - The current block.
virtual bool enableMachineScheduler() const
True if the subtarget should run MachineScheduler after aggressive coalescing.
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:640
unsigned first
bool isOrEquivalentToAdd(const SDNode *N) const
TargetIntrinsicInfo - Interface to description of machine instruction set.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:361
bool use_empty() const
Return true if there are no uses of this node.
bool SplitCSR
True if part of the CSRs will be handled via explicit copies.
bool isLandingPad() const
Return true if this basic block is a landing pad.
Definition: BasicBlock.cpp:443
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
void dump() const
Dump this node, for debugging.
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
ScheduleDAGSDNodes * createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up list scheduler that schedules nodes in source c...
bool memoperands_empty() const
Iterator for intrusive lists based on ilist_node.
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
virtual void initializeSplitCSR(MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file. ...
virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N, unsigned PatternNo, SmallVectorImpl< std::pair< SDValue, SDNode *> > &Result)
DenseMap< std::pair< const MachineBasicBlock *, const Value * >, unsigned > SwiftErrorVRegDefMap
A map from swifterror value in a basic block to the virtual register it is currently represented by...
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:210
SDNode * SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type...
bool isDebugValue() const
Definition: MachineInstr.h:819
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void setFastISel(bool Enable)
static void createSwiftErrorEntriesInEntryBlock(FunctionLoweringInfo *FuncInfo, FastISel *FastIS, const TargetLowering *TLI, const TargetInstrInfo *TII, SelectionDAGBuilder *SDB)
An SDNode that represents everything that will be needed to construct a MachineInstr.
SelectionDAGISel(TargetMachine &tm, CodeGenOpt::Level OL=CodeGenOpt::Default)
void visit(const Instruction &I)
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:383
static void processDbgDeclares(FunctionLoweringInfo *FuncInfo)
Collect llvm.dbg.declare information.
DenseMap< std::pair< const MachineBasicBlock *, const Value * >, unsigned > SwiftErrorVRegUpwardsUse
A list of upward exposed vreg uses that need to be satisfied by either a copy def or a phi node at th...
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
UpdateSplitBlock - When an MBB was split during scheduling, update the references that need to refer ...