LLVM  9.0.0svn
SelectionDAGISel.cpp
Go to the documentation of this file.
1 //===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAGISel class.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "ScheduleDAGSDNodes.h"
15 #include "SelectionDAGBuilder.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
28 #include "llvm/Analysis/CFG.h"
33 #include "llvm/CodeGen/FastISel.h"
57 #include "llvm/IR/BasicBlock.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
61 #include "llvm/IR/DebugLoc.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Dominators.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/InlineAsm.h"
66 #include "llvm/IR/InstIterator.h"
67 #include "llvm/IR/InstrTypes.h"
68 #include "llvm/IR/Instruction.h"
69 #include "llvm/IR/Instructions.h"
70 #include "llvm/IR/IntrinsicInst.h"
71 #include "llvm/IR/Intrinsics.h"
72 #include "llvm/IR/Metadata.h"
73 #include "llvm/IR/Type.h"
74 #include "llvm/IR/User.h"
75 #include "llvm/IR/Value.h"
76 #include "llvm/MC/MCInstrDesc.h"
77 #include "llvm/MC/MCRegisterInfo.h"
78 #include "llvm/Pass.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/KnownBits.h"
88 #include "llvm/Support/Timer.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <limits>
99 #include <memory>
100 #include <string>
101 #include <utility>
102 #include <vector>
103 
104 using namespace llvm;
105 
106 #define DEBUG_TYPE "isel"
107 
108 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
109 STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
110 STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
111 STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
112 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
113 STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
114 STATISTIC(NumFastIselFailLowerArguments,
115  "Number of entry blocks where fast isel failed to lower arguments");
116 
118  "fast-isel-abort", cl::Hidden,
119  cl::desc("Enable abort calls when \"fast\" instruction selection "
120  "fails to lower an instruction: 0 disable the abort, 1 will "
121  "abort but for args, calls and terminators, 2 will also "
122  "abort for argument lowering, and 3 will never fallback "
123  "to SelectionDAG."));
124 
126  "fast-isel-report-on-fallback", cl::Hidden,
127  cl::desc("Emit a diagnostic when \"fast\" instruction selection "
128  "falls back to SelectionDAG."));
129 
130 static cl::opt<bool>
131 UseMBPI("use-mbpi",
132  cl::desc("use Machine Branch Probability Info"),
133  cl::init(true), cl::Hidden);
134 
135 #ifndef NDEBUG
137 FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
138  cl::desc("Only display the basic block whose name "
139  "matches this for all view-*-dags options"));
140 static cl::opt<bool>
141 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
142  cl::desc("Pop up a window to show dags before the first "
143  "dag combine pass"));
144 static cl::opt<bool>
145 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
146  cl::desc("Pop up a window to show dags before legalize types"));
147 static cl::opt<bool>
148 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
149  cl::desc("Pop up a window to show dags before legalize"));
150 static cl::opt<bool>
151 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
152  cl::desc("Pop up a window to show dags before the second "
153  "dag combine pass"));
154 static cl::opt<bool>
155 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
156  cl::desc("Pop up a window to show dags before the post legalize types"
157  " dag combine pass"));
158 static cl::opt<bool>
159 ViewISelDAGs("view-isel-dags", cl::Hidden,
160  cl::desc("Pop up a window to show isel dags as they are selected"));
161 static cl::opt<bool>
162 ViewSchedDAGs("view-sched-dags", cl::Hidden,
163  cl::desc("Pop up a window to show sched dags as they are processed"));
164 static cl::opt<bool>
165 ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
166  cl::desc("Pop up a window to show SUnit dags after they are processed"));
167 #else
168 static const bool ViewDAGCombine1 = false,
169  ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
170  ViewDAGCombine2 = false,
171  ViewDAGCombineLT = false,
172  ViewISelDAGs = false, ViewSchedDAGs = false,
173  ViewSUnitDAGs = false;
174 #endif
175 
176 //===---------------------------------------------------------------------===//
177 ///
178 /// RegisterScheduler class - Track the registration of instruction schedulers.
179 ///
180 //===---------------------------------------------------------------------===//
183 
184 //===---------------------------------------------------------------------===//
185 ///
186 /// ISHeuristic command line option for instruction schedulers.
187 ///
188 //===---------------------------------------------------------------------===//
191 ISHeuristic("pre-RA-sched",
193  cl::desc("Instruction schedulers available (before register"
194  " allocation):"));
195 
196 static RegisterScheduler
197 defaultListDAGScheduler("default", "Best scheduler for the target",
199 
200 namespace llvm {
201 
202  //===--------------------------------------------------------------------===//
203  /// This class is used by SelectionDAGISel to temporarily override
204  /// the optimization level on a per-function basis.
206  SelectionDAGISel &IS;
207  CodeGenOpt::Level SavedOptLevel;
208  bool SavedFastISel;
209 
210  public:
212  CodeGenOpt::Level NewOptLevel) : IS(ISel) {
213  SavedOptLevel = IS.OptLevel;
214  if (NewOptLevel == SavedOptLevel)
215  return;
216  IS.OptLevel = NewOptLevel;
217  IS.TM.setOptLevel(NewOptLevel);
218  LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
219  << IS.MF->getFunction().getName() << "\n");
220  LLVM_DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel << " ; After: -O"
221  << NewOptLevel << "\n");
222  SavedFastISel = IS.TM.Options.EnableFastISel;
223  if (NewOptLevel == CodeGenOpt::None) {
225  LLVM_DEBUG(
226  dbgs() << "\tFastISel is "
227  << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
228  << "\n");
229  }
230  }
231 
233  if (IS.OptLevel == SavedOptLevel)
234  return;
235  LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
236  << IS.MF->getFunction().getName() << "\n");
237  LLVM_DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel << " ; After: -O"
238  << SavedOptLevel << "\n");
239  IS.OptLevel = SavedOptLevel;
240  IS.TM.setOptLevel(SavedOptLevel);
241  IS.TM.setFastISel(SavedFastISel);
242  }
243  };
244 
245  //===--------------------------------------------------------------------===//
246  /// createDefaultScheduler - This creates an instruction scheduler appropriate
247  /// for the target.
249  CodeGenOpt::Level OptLevel) {
250  const TargetLowering *TLI = IS->TLI;
251  const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
252 
253  // Try first to see if the Target has its own way of selecting a scheduler
254  if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
255  return SchedulerCtor(IS, OptLevel);
256  }
257 
258  if (OptLevel == CodeGenOpt::None ||
261  return createSourceListDAGScheduler(IS, OptLevel);
263  return createBURRListDAGScheduler(IS, OptLevel);
265  return createHybridListDAGScheduler(IS, OptLevel);
266  if (TLI->getSchedulingPreference() == Sched::VLIW)
267  return createVLIWDAGScheduler(IS, OptLevel);
269  "Unknown sched type!");
270  return createILPListDAGScheduler(IS, OptLevel);
271  }
272 
273 } // end namespace llvm
274 
275 // EmitInstrWithCustomInserter - This method should be implemented by targets
276 // that mark instructions with the 'usesCustomInserter' flag. These
277 // instructions are special in various ways, which require special support to
278 // insert. The specified MachineInstr is created but not inserted into any
279 // basic blocks, and this method is called to expand it into a sequence of
280 // instructions, potentially also creating new basic blocks and control flow.
281 // When new basic blocks are inserted and the edges from MBB to its successors
282 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
283 // DenseMap.
286  MachineBasicBlock *MBB) const {
287 #ifndef NDEBUG
288  dbgs() << "If a target marks an instruction with "
289  "'usesCustomInserter', it must implement "
290  "TargetLowering::EmitInstrWithCustomInserter!";
291 #endif
292  llvm_unreachable(nullptr);
293 }
294 
296  SDNode *Node) const {
297  assert(!MI.hasPostISelHook() &&
298  "If a target marks an instruction with 'hasPostISelHook', "
299  "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
300 }
301 
302 //===----------------------------------------------------------------------===//
303 // SelectionDAGISel code
304 //===----------------------------------------------------------------------===//
305 
307  CodeGenOpt::Level OL) :
308  MachineFunctionPass(ID), TM(tm),
309  FuncInfo(new FunctionLoweringInfo()),
310  CurDAG(new SelectionDAG(tm, OL)),
311  SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
312  AA(), GFI(),
313  OptLevel(OL),
314  DAGSize(0) {
321  }
322 
324  delete SDB;
325  delete CurDAG;
326  delete FuncInfo;
327 }
328 
330  if (OptLevel != CodeGenOpt::None)
340 }
341 
342 /// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
343 /// may trap on it. In this case we have to split the edge so that the path
344 /// through the predecessor block that doesn't go to the phi block doesn't
345 /// execute the possibly trapping instruction. If available, we pass domtree
346 /// and loop info to be updated when we split critical edges. This is because
347 /// SelectionDAGISel preserves these analyses.
348 /// This is required for correctness, so it must be done at -O0.
349 ///
351  LoopInfo *LI) {
352  // Loop for blocks with phi nodes.
353  for (BasicBlock &BB : Fn) {
354  PHINode *PN = dyn_cast<PHINode>(BB.begin());
355  if (!PN) continue;
356 
357  ReprocessBlock:
358  // For each block with a PHI node, check to see if any of the input values
359  // are potentially trapping constant expressions. Constant expressions are
360  // the only potentially trapping value that can occur as the argument to a
361  // PHI.
362  for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I)); ++I)
363  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
365  if (!CE || !CE->canTrap()) continue;
366 
367  // The only case we have to worry about is when the edge is critical.
368  // Since this block has a PHI Node, we assume it has multiple input
369  // edges: check to see if the pred has multiple successors.
370  BasicBlock *Pred = PN->getIncomingBlock(i);
371  if (Pred->getTerminator()->getNumSuccessors() == 1)
372  continue;
373 
374  // Okay, we have to split this edge.
376  Pred->getTerminator(), GetSuccessorNumber(Pred, &BB),
378  goto ReprocessBlock;
379  }
380  }
381 }
382 
383 static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
384  MachineModuleInfo &MMI) {
385  // Only needed for MSVC
387  return;
388 
389  // If it's already set, nothing to do.
390  if (MMI.usesMSVCFloatingPoint())
391  return;
392 
393  for (const Instruction &I : instructions(F)) {
394  if (I.getType()->isFPOrFPVectorTy()) {
395  MMI.setUsesMSVCFloatingPoint(true);
396  return;
397  }
398  for (const auto &Op : I.operands()) {
399  if (Op->getType()->isFPOrFPVectorTy()) {
400  MMI.setUsesMSVCFloatingPoint(true);
401  return;
402  }
403  }
404  }
405 }
406 
408  // If we already selected that function, we do not need to run SDISel.
409  if (mf.getProperties().hasProperty(
411  return false;
412  // Do some sanity-checking on the command-line options.
414  "-fast-isel-abort > 0 requires -fast-isel");
415 
416  const Function &Fn = mf.getFunction();
417  MF = &mf;
418 
419  // Reset the target options before resetting the optimization
420  // level below.
421  // FIXME: This is a horrible hack and should be processed via
422  // codegen looking at the optimization level explicitly when
423  // it wants to look at it.
425  // Reset OptLevel to None for optnone functions.
426  CodeGenOpt::Level NewOptLevel = OptLevel;
427  if (OptLevel != CodeGenOpt::None && skipFunction(Fn))
428  NewOptLevel = CodeGenOpt::None;
429  OptLevelChanger OLC(*this, NewOptLevel);
430 
433  RegInfo = &MF->getRegInfo();
434  LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
435  GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
436  ORE = make_unique<OptimizationRemarkEmitter>(&Fn);
437  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
438  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
439  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
440  LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
441 
442  LLVM_DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
443 
444  SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI);
445 
446  CurDAG->init(*MF, *ORE, this, LibInfo,
447  getAnalysisIfAvailable<LegacyDivergenceAnalysis>());
448  FuncInfo->set(Fn, *MF, CurDAG);
449 
450  // Now get the optional analyzes if we want to.
451  // This is based on the possibly changed OptLevel (after optnone is taken
452  // into account). That's unfortunate but OK because it just means we won't
453  // ask for passes that have been required anyway.
454 
456  FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
457  else
458  FuncInfo->BPI = nullptr;
459 
460  if (OptLevel != CodeGenOpt::None)
461  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
462  else
463  AA = nullptr;
464 
465  SDB->init(GFI, AA, LibInfo);
466 
467  MF->setHasInlineAsm(false);
468 
469  FuncInfo->SplitCSR = false;
470 
471  // We split CSR if the target supports it for the given function
472  // and the function has only return exits.
474  FuncInfo->SplitCSR = true;
475 
476  // Collect all the return blocks.
477  for (const BasicBlock &BB : Fn) {
478  if (!succ_empty(&BB))
479  continue;
480 
481  const Instruction *Term = BB.getTerminator();
482  if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
483  continue;
484 
485  // Bail out if the exit block is not Return nor Unreachable.
486  FuncInfo->SplitCSR = false;
487  break;
488  }
489  }
490 
491  MachineBasicBlock *EntryMBB = &MF->front();
492  if (FuncInfo->SplitCSR)
493  // This performs initialization so lowering for SplitCSR will be correct.
494  TLI->initializeSplitCSR(EntryMBB);
495 
496  SelectAllBasicBlocks(Fn);
498  DiagnosticInfoISelFallback DiagFallback(Fn);
499  Fn.getContext().diagnose(DiagFallback);
500  }
501 
502  // If the first basic block in the function has live ins that need to be
503  // copied into vregs, emit the copies into the top of the block before
504  // emitting the code for the block.
506  RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
507 
508  // Insert copies in the entry block and the return blocks.
509  if (FuncInfo->SplitCSR) {
511  // Collect all the return blocks.
512  for (MachineBasicBlock &MBB : mf) {
513  if (!MBB.succ_empty())
514  continue;
515 
516  MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
517  if (Term != MBB.end() && Term->isReturn()) {
518  Returns.push_back(&MBB);
519  continue;
520  }
521  }
522  TLI->insertCopiesSplitCSR(EntryMBB, Returns);
523  }
524 
526  if (!FuncInfo->ArgDbgValues.empty())
527  for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
528  if (LI.second)
529  LiveInMap.insert(LI);
530 
531  // Insert DBG_VALUE instructions for function arguments to the entry block.
532  for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
534  bool hasFI = MI->getOperand(0).isFI();
535  unsigned Reg =
536  hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
538  EntryMBB->insert(EntryMBB->begin(), MI);
539  else {
541  if (Def) {
542  MachineBasicBlock::iterator InsertPos = Def;
543  // FIXME: VR def may not be in entry block.
544  Def->getParent()->insert(std::next(InsertPos), MI);
545  } else
546  LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
547  << TargetRegisterInfo::virtReg2Index(Reg) << "\n");
548  }
549 
550  // If Reg is live-in then update debug info to track its copy in a vreg.
551  DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
552  if (LDI != LiveInMap.end()) {
553  assert(!hasFI && "There's no handling of frame pointer updating here yet "
554  "- add if needed");
555  MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
556  MachineBasicBlock::iterator InsertPos = Def;
557  const MDNode *Variable = MI->getDebugVariable();
558  const MDNode *Expr = MI->getDebugExpression();
559  DebugLoc DL = MI->getDebugLoc();
560  bool IsIndirect = MI->isIndirectDebugValue();
561  if (IsIndirect)
562  assert(MI->getOperand(1).getImm() == 0 &&
563  "DBG_VALUE with nonzero offset");
564  assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
565  "Expected inlined-at fields to agree");
566  // Def is never a terminator here, so it is ok to increment InsertPos.
567  BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
568  IsIndirect, LDI->second, Variable, Expr);
569 
570  // If this vreg is directly copied into an exported register then
571  // that COPY instructions also need DBG_VALUE, if it is the only
572  // user of LDI->second.
573  MachineInstr *CopyUseMI = nullptr;
575  UI = RegInfo->use_instr_begin(LDI->second),
576  E = RegInfo->use_instr_end(); UI != E; ) {
577  MachineInstr *UseMI = &*(UI++);
578  if (UseMI->isDebugValue()) continue;
579  if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
580  CopyUseMI = UseMI; continue;
581  }
582  // Otherwise this is another use or second copy use.
583  CopyUseMI = nullptr; break;
584  }
585  if (CopyUseMI) {
586  // Use MI's debug location, which describes where Variable was
587  // declared, rather than whatever is attached to CopyUseMI.
588  MachineInstr *NewMI =
589  BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
590  CopyUseMI->getOperand(0).getReg(), Variable, Expr);
591  MachineBasicBlock::iterator Pos = CopyUseMI;
592  EntryMBB->insertAfter(Pos, NewMI);
593  }
594  }
595  }
596 
597  // Determine if there are any calls in this machine function.
598  MachineFrameInfo &MFI = MF->getFrameInfo();
599  for (const auto &MBB : *MF) {
600  if (MFI.hasCalls() && MF->hasInlineAsm())
601  break;
602 
603  for (const auto &MI : MBB) {
604  const MCInstrDesc &MCID = TII->get(MI.getOpcode());
605  if ((MCID.isCall() && !MCID.isReturn()) ||
606  MI.isStackAligningInlineAsm()) {
607  MFI.setHasCalls(true);
608  }
609  if (MI.isInlineAsm()) {
610  MF->setHasInlineAsm(true);
611  }
612  }
613  }
614 
615  // Determine if there is a call to setjmp in the machine function.
616  MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
617 
618  // Determine if floating point is used for msvc
619  computeUsesMSVCFloatingPoint(TM.getTargetTriple(), Fn, MF->getMMI());
620 
621  // Replace forward-declared registers with the registers containing
622  // the desired value.
623  MachineRegisterInfo &MRI = MF->getRegInfo();
626  I != E; ++I) {
627  unsigned From = I->first;
628  unsigned To = I->second;
629  // If To is also scheduled to be replaced, find what its ultimate
630  // replacement is.
631  while (true) {
633  if (J == E) break;
634  To = J->second;
635  }
636  // Make sure the new register has a sufficiently constrained register class.
639  MRI.constrainRegClass(To, MRI.getRegClass(From));
640  // Replace it.
641 
642 
643  // Replacing one register with another won't touch the kill flags.
644  // We need to conservatively clear the kill flags as a kill on the old
645  // register might dominate existing uses of the new register.
646  if (!MRI.use_empty(To))
647  MRI.clearKillFlags(From);
648  MRI.replaceRegWith(From, To);
649  }
650 
651  TLI->finalizeLowering(*MF);
652 
653  // Release function-specific state. SDB and CurDAG are already cleared
654  // at this point.
655  FuncInfo->clear();
656 
657  LLVM_DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
658  LLVM_DEBUG(MF->print(dbgs()));
659 
660  return true;
661 }
662 
666  bool ShouldAbort) {
667  // Print the function name explicitly if we don't have a debug location (which
668  // makes the diagnostic less useful) or if we're going to emit a raw error.
669  if (!R.getLocation().isValid() || ShouldAbort)
670  R << (" (in function: " + MF.getName() + ")").str();
671 
672  if (ShouldAbort)
674 
675  ORE.emit(R);
676 }
677 
678 void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
680  bool &HadTailCall) {
681  // Allow creating illegal types during DAG building for the basic block.
683 
684  // Lower the instructions. If a call is emitted as a tail call, cease emitting
685  // nodes for this block.
686  for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
687  if (!ElidedArgCopyInstrs.count(&*I))
688  SDB->visit(*I);
689  }
690 
691  // Make sure the root of the DAG is up-to-date.
693  HadTailCall = SDB->HasTailCall;
695  SDB->clear();
696 
697  // Final step, emit the lowered DAG as machine code.
698  CodeGenAndEmitDAG();
699 }
700 
701 void SelectionDAGISel::ComputeLiveOutVRegInfo() {
702  SmallPtrSet<SDNode*, 16> VisitedNodes;
703  SmallVector<SDNode*, 128> Worklist;
704 
705  Worklist.push_back(CurDAG->getRoot().getNode());
706 
707  KnownBits Known;
708 
709  do {
710  SDNode *N = Worklist.pop_back_val();
711 
712  // If we've already seen this node, ignore it.
713  if (!VisitedNodes.insert(N).second)
714  continue;
715 
716  // Otherwise, add all chain operands to the worklist.
717  for (const SDValue &Op : N->op_values())
718  if (Op.getValueType() == MVT::Other)
719  Worklist.push_back(Op.getNode());
720 
721  // If this is a CopyToReg with a vreg dest, process it.
722  if (N->getOpcode() != ISD::CopyToReg)
723  continue;
724 
725  unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
727  continue;
728 
729  // Ignore non-integer values.
730  SDValue Src = N->getOperand(2);
731  EVT SrcVT = Src.getValueType();
732  if (!SrcVT.isInteger())
733  continue;
734 
735  unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
736  Known = CurDAG->computeKnownBits(Src);
737  FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
738  } while (!Worklist.empty());
739 }
740 
741 void SelectionDAGISel::CodeGenAndEmitDAG() {
742  StringRef GroupName = "sdag";
743  StringRef GroupDescription = "Instruction Selection and Scheduling";
744  std::string BlockName;
745  int BlockNumber = -1;
746  (void)BlockNumber;
747  bool MatchFilterBB = false; (void)MatchFilterBB;
748 #ifndef NDEBUG
749  TargetTransformInfo &TTI =
750  getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*FuncInfo->Fn);
751 #endif
752 
753  // Pre-type legalization allow creation of any node types.
755 
756 #ifndef NDEBUG
757  MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
760 #endif
761 #ifdef NDEBUG
762  if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
765 #endif
766  {
767  BlockNumber = FuncInfo->MBB->getNumber();
768  BlockName =
769  (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
770  }
771  LLVM_DEBUG(dbgs() << "Initial selection DAG: "
772  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
773  << "'\n";
774  CurDAG->dump());
775 
776  if (ViewDAGCombine1 && MatchFilterBB)
777  CurDAG->viewGraph("dag-combine1 input for " + BlockName);
778 
779  // Run the DAG combiner in pre-legalize mode.
780  {
781  NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
782  GroupDescription, TimePassesIsEnabled);
784  }
785 
786 #ifndef NDEBUG
787  if (TTI.hasBranchDivergence())
789 #endif
790 
791  LLVM_DEBUG(dbgs() << "Optimized lowered selection DAG: "
792  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
793  << "'\n";
794  CurDAG->dump());
795 
796  // Second step, hack on the DAG until it only uses operations and types that
797  // the target supports.
798  if (ViewLegalizeTypesDAGs && MatchFilterBB)
799  CurDAG->viewGraph("legalize-types input for " + BlockName);
800 
801  bool Changed;
802  {
803  NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
804  GroupDescription, TimePassesIsEnabled);
805  Changed = CurDAG->LegalizeTypes();
806  }
807 
808 #ifndef NDEBUG
809  if (TTI.hasBranchDivergence())
811 #endif
812 
813  LLVM_DEBUG(dbgs() << "Type-legalized selection DAG: "
814  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
815  << "'\n";
816  CurDAG->dump());
817 
818  // Only allow creation of legal node types.
820 
821  if (Changed) {
822  if (ViewDAGCombineLT && MatchFilterBB)
823  CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
824 
825  // Run the DAG combiner in post-type-legalize mode.
826  {
827  NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
828  GroupName, GroupDescription, TimePassesIsEnabled);
830  }
831 
832 #ifndef NDEBUG
833  if (TTI.hasBranchDivergence())
835 #endif
836 
837  LLVM_DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
838  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
839  << "'\n";
840  CurDAG->dump());
841  }
842 
843  {
844  NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
845  GroupDescription, TimePassesIsEnabled);
846  Changed = CurDAG->LegalizeVectors();
847  }
848 
849  if (Changed) {
850  LLVM_DEBUG(dbgs() << "Vector-legalized selection DAG: "
851  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
852  << "'\n";
853  CurDAG->dump());
854 
855  {
856  NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
857  GroupDescription, TimePassesIsEnabled);
859  }
860 
861  LLVM_DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
862  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
863  << "'\n";
864  CurDAG->dump());
865 
866  if (ViewDAGCombineLT && MatchFilterBB)
867  CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
868 
869  // Run the DAG combiner in post-type-legalize mode.
870  {
871  NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
872  GroupName, GroupDescription, TimePassesIsEnabled);
874  }
875 
876  LLVM_DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
877  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
878  << "'\n";
879  CurDAG->dump());
880 
881 #ifndef NDEBUG
882  if (TTI.hasBranchDivergence())
884 #endif
885  }
886 
887  if (ViewLegalizeDAGs && MatchFilterBB)
888  CurDAG->viewGraph("legalize input for " + BlockName);
889 
890  {
891  NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
892  GroupDescription, TimePassesIsEnabled);
893  CurDAG->Legalize();
894  }
895 
896 #ifndef NDEBUG
897  if (TTI.hasBranchDivergence())
899 #endif
900 
901  LLVM_DEBUG(dbgs() << "Legalized selection DAG: "
902  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
903  << "'\n";
904  CurDAG->dump());
905 
906  if (ViewDAGCombine2 && MatchFilterBB)
907  CurDAG->viewGraph("dag-combine2 input for " + BlockName);
908 
909  // Run the DAG combiner in post-legalize mode.
910  {
911  NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
912  GroupDescription, TimePassesIsEnabled);
914  }
915 
916 #ifndef NDEBUG
917  if (TTI.hasBranchDivergence())
919 #endif
920 
921  LLVM_DEBUG(dbgs() << "Optimized legalized selection DAG: "
922  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
923  << "'\n";
924  CurDAG->dump());
925 
926  if (OptLevel != CodeGenOpt::None)
927  ComputeLiveOutVRegInfo();
928 
929  if (ViewISelDAGs && MatchFilterBB)
930  CurDAG->viewGraph("isel input for " + BlockName);
931 
932  // Third, instruction select all of the operations to machine code, adding the
933  // code to the MachineBasicBlock.
934  {
935  NamedRegionTimer T("isel", "Instruction Selection", GroupName,
936  GroupDescription, TimePassesIsEnabled);
937  DoInstructionSelection();
938  }
939 
940  LLVM_DEBUG(dbgs() << "Selected selection DAG: "
941  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
942  << "'\n";
943  CurDAG->dump());
944 
945  if (ViewSchedDAGs && MatchFilterBB)
946  CurDAG->viewGraph("scheduler input for " + BlockName);
947 
948  // Schedule machine code.
949  ScheduleDAGSDNodes *Scheduler = CreateScheduler();
950  {
951  NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
952  GroupDescription, TimePassesIsEnabled);
953  Scheduler->Run(CurDAG, FuncInfo->MBB);
954  }
955 
956  if (ViewSUnitDAGs && MatchFilterBB)
957  Scheduler->viewGraph();
958 
959  // Emit machine code to BB. This can change 'BB' to the last block being
960  // inserted into.
961  MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
962  {
963  NamedRegionTimer T("emit", "Instruction Creation", GroupName,
964  GroupDescription, TimePassesIsEnabled);
965 
966  // FuncInfo->InsertPt is passed by reference and set to the end of the
967  // scheduled instructions.
968  LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
969  }
970 
971  // If the block was split, make sure we update any references that are used to
972  // update PHI nodes later on.
973  if (FirstMBB != LastMBB)
974  SDB->UpdateSplitBlock(FirstMBB, LastMBB);
975 
976  // Free the scheduler state.
977  {
978  NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
979  GroupDescription, TimePassesIsEnabled);
980  delete Scheduler;
981  }
982 
983  // Free the SelectionDAG state, now that we're finished with it.
984  CurDAG->clear();
985 }
986 
987 namespace {
988 
989 /// ISelUpdater - helper class to handle updates of the instruction selection
990 /// graph.
991 class ISelUpdater : public SelectionDAG::DAGUpdateListener {
992  SelectionDAG::allnodes_iterator &ISelPosition;
993 
994 public:
995  ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
996  : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
997 
998  /// NodeDeleted - Handle nodes deleted from the graph. If the node being
999  /// deleted is the current ISelPosition node, update ISelPosition.
1000  ///
1001  void NodeDeleted(SDNode *N, SDNode *E) override {
1002  if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1003  ++ISelPosition;
1004  }
1005 };
1006 
1007 } // end anonymous namespace
1008 
1009 // This function is used to enforce the topological node id property
1010 // property leveraged during Instruction selection. Before selection all
1011 // nodes are given a non-negative id such that all nodes have a larger id than
1012 // their operands. As this holds transitively we can prune checks that a node N
1013 // is a predecessor of M another by not recursively checking through M's
1014 // operands if N's ID is larger than M's ID. This is significantly improves
1015 // performance of for various legality checks (e.g. IsLegalToFold /
1016 // UpdateChains).
1017 
1018 // However, when we fuse multiple nodes into a single node
1019 // during selection we may induce a predecessor relationship between inputs and
1020 // outputs of distinct nodes being merged violating the topological property.
1021 // Should a fused node have a successor which has yet to be selected, our
1022 // legality checks would be incorrect. To avoid this we mark all unselected
1023 // sucessor nodes, i.e. id != -1 as invalid for pruning by bit-negating (x =>
1024 // (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1025 // We use bit-negation to more clearly enforce that node id -1 can only be
1026 // achieved by selected nodes). As the conversion is reversable the original Id,
1027 // topological pruning can still be leveraged when looking for unselected nodes.
1028 // This method is call internally in all ISel replacement calls.
1031  Nodes.push_back(Node);
1032 
1033  while (!Nodes.empty()) {
1034  SDNode *N = Nodes.pop_back_val();
1035  for (auto *U : N->uses()) {
1036  auto UId = U->getNodeId();
1037  if (UId > 0) {
1038  InvalidateNodeId(U);
1039  Nodes.push_back(U);
1040  }
1041  }
1042  }
1043 }
1044 
1045 // InvalidateNodeId - As discusses in EnforceNodeIdInvariant, mark a
1046 // NodeId with the equivalent node id which is invalid for topological
1047 // pruning.
1049  int InvalidId = -(N->getNodeId() + 1);
1050  N->setNodeId(InvalidId);
1051 }
1052 
1053 // getUninvalidatedNodeId - get original uninvalidated node id.
1055  int Id = N->getNodeId();
1056  if (Id < -1)
1057  return -(Id + 1);
1058  return Id;
1059 }
1060 
1061 void SelectionDAGISel::DoInstructionSelection() {
1062  LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1063  << printMBBReference(*FuncInfo->MBB) << " '"
1064  << FuncInfo->MBB->getName() << "'\n");
1065 
1067 
1068  // Select target instructions for the DAG.
1069  {
1070  // Number all nodes with a topological order and set DAGSize.
1072 
1073  // Create a dummy node (which is not added to allnodes), that adds
1074  // a reference to the root node, preventing it from being deleted,
1075  // and tracking any changes of the root.
1078  ++ISelPosition;
1079 
1080  // Make sure that ISelPosition gets properly updated when nodes are deleted
1081  // in calls made from this function.
1082  ISelUpdater ISU(*CurDAG, ISelPosition);
1083 
1084  // The AllNodes list is now topological-sorted. Visit the
1085  // nodes by starting at the end of the list (the root of the
1086  // graph) and preceding back toward the beginning (the entry
1087  // node).
1088  while (ISelPosition != CurDAG->allnodes_begin()) {
1089  SDNode *Node = &*--ISelPosition;
1090  // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1091  // but there are currently some corner cases that it misses. Also, this
1092  // makes it theoretically possible to disable the DAGCombiner.
1093  if (Node->use_empty())
1094  continue;
1095 
1096 #ifndef NDEBUG
1098  Nodes.push_back(Node);
1099 
1100  while (!Nodes.empty()) {
1101  auto N = Nodes.pop_back_val();
1102  if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1103  continue;
1104  for (const SDValue &Op : N->op_values()) {
1105  if (Op->getOpcode() == ISD::TokenFactor)
1106  Nodes.push_back(Op.getNode());
1107  else {
1108  // We rely on topological ordering of node ids for checking for
1109  // cycles when fusing nodes during selection. All unselected nodes
1110  // successors of an already selected node should have a negative id.
1111  // This assertion will catch such cases. If this assertion triggers
1112  // it is likely you using DAG-level Value/Node replacement functions
1113  // (versus equivalent ISEL replacement) in backend-specific
1114  // selections. See comment in EnforceNodeIdInvariant for more
1115  // details.
1116  assert(Op->getNodeId() != -1 &&
1117  "Node has already selected predecessor node");
1118  }
1119  }
1120  }
1121 #endif
1122 
1123  // When we are using non-default rounding modes or FP exception behavior
1124  // FP operations are represented by StrictFP pseudo-operations. They
1125  // need to be simplified here so that the target-specific instruction
1126  // selectors know how to handle them.
1127  //
1128  // If the current node is a strict FP pseudo-op, the isStrictFPOp()
1129  // function will provide the corresponding normal FP opcode to which the
1130  // node should be mutated.
1131  //
1132  // FIXME: The backends need a way to handle FP constraints.
1133  if (Node->isStrictFPOpcode())
1134  Node = CurDAG->mutateStrictFPToFP(Node);
1135 
1136  LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1137  Node->dump(CurDAG));
1138 
1139  Select(Node);
1140  }
1141 
1142  CurDAG->setRoot(Dummy.getValue());
1143  }
1144 
1145  LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1146 
1148 }
1149 
1151  for (const User *U : CPI->users()) {
1152  if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
1153  Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1154  if (IID == Intrinsic::eh_exceptionpointer ||
1155  IID == Intrinsic::eh_exceptioncode)
1156  return true;
1157  }
1158  }
1159  return false;
1160 }
1161 
1162 // wasm.landingpad.index intrinsic is for associating a landing pad index number
1163 // with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1164 // and store the mapping in the function.
1166  const CatchPadInst *CPI) {
1167  MachineFunction *MF = MBB->getParent();
1168  // In case of single catch (...), we don't emit LSDA, so we don't need
1169  // this information.
1170  bool IsSingleCatchAllClause =
1171  CPI->getNumArgOperands() == 1 &&
1172  cast<Constant>(CPI->getArgOperand(0))->isNullValue();
1173  if (!IsSingleCatchAllClause) {
1174  // Create a mapping from landing pad label to landing pad index.
1175  bool IntrFound = false;
1176  for (const User *U : CPI->users()) {
1177  if (const auto *Call = dyn_cast<IntrinsicInst>(U)) {
1178  Intrinsic::ID IID = Call->getIntrinsicID();
1179  if (IID == Intrinsic::wasm_landingpad_index) {
1180  Value *IndexArg = Call->getArgOperand(1);
1181  int Index = cast<ConstantInt>(IndexArg)->getZExtValue();
1182  MF->setWasmLandingPadIndex(MBB, Index);
1183  IntrFound = true;
1184  break;
1185  }
1186  }
1187  }
1188  assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1189  (void)IntrFound;
1190  }
1191 }
1192 
1193 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1194 /// do other setup for EH landing-pad blocks.
1195 bool SelectionDAGISel::PrepareEHLandingPad() {
1196  MachineBasicBlock *MBB = FuncInfo->MBB;
1197  const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1198  const BasicBlock *LLVMBB = MBB->getBasicBlock();
1199  const TargetRegisterClass *PtrRC =
1201 
1202  auto Pers = classifyEHPersonality(PersonalityFn);
1203 
1204  // Catchpads have one live-in register, which typically holds the exception
1205  // pointer or code.
1206  if (isFuncletEHPersonality(Pers)) {
1207  if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
1208  if (hasExceptionPointerOrCodeUser(CPI)) {
1209  // Get or create the virtual register to hold the pointer or code. Mark
1210  // the live in physreg and copy into the vreg.
1211  MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1212  assert(EHPhysReg && "target lacks exception pointer register");
1213  MBB->addLiveIn(EHPhysReg);
1214  unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
1216  TII->get(TargetOpcode::COPY), VReg)
1217  .addReg(EHPhysReg, RegState::Kill);
1218  }
1219  }
1220  return true;
1221  }
1222 
1223  // Add a label to mark the beginning of the landing pad. Deletion of the
1224  // landing pad can thus be detected via the MachineModuleInfo.
1225  MCSymbol *Label = MF->addLandingPad(MBB);
1226 
1227  const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
1228  BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
1229  .addSym(Label);
1230 
1231  if (Pers == EHPersonality::Wasm_CXX) {
1232  if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI()))
1233  mapWasmLandingPadIndex(MBB, CPI);
1234  } else {
1235  // Assign the call site to the landing pad's begin label.
1237  // Mark exception register as live in.
1238  if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1240  // Mark exception selector register as live in.
1241  if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1243  }
1244 
1245  return true;
1246 }
1247 
1248 /// isFoldedOrDeadInstruction - Return true if the specified instruction is
1249 /// side-effect free and is either dead or folded into a generated instruction.
1250 /// Return false if it needs to be emitted.
1253  return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1254  !I->isTerminator() && // Terminators aren't folded.
1255  !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
1256  !I->isEHPad() && // EH pad instructions aren't folded.
1257  !FuncInfo->isExportedInst(I); // Exported instrs must be computed.
1258 }
1259 
1260 /// Set up SwiftErrorVals by going through the function. If the function has
1261 /// swifterror argument, it will be the first entry.
1262 static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI,
1264  if (!TLI->supportSwiftError())
1265  return;
1266 
1267  FuncInfo->SwiftErrorVals.clear();
1268  FuncInfo->SwiftErrorVRegDefMap.clear();
1269  FuncInfo->SwiftErrorVRegUpwardsUse.clear();
1270  FuncInfo->SwiftErrorVRegDefUses.clear();
1271  FuncInfo->SwiftErrorArg = nullptr;
1272 
1273  // Check if function has a swifterror argument.
1274  bool HaveSeenSwiftErrorArg = false;
1275  for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end();
1276  AI != AE; ++AI)
1277  if (AI->hasSwiftErrorAttr()) {
1278  assert(!HaveSeenSwiftErrorArg &&
1279  "Must have only one swifterror parameter");
1280  (void)HaveSeenSwiftErrorArg; // silence warning.
1281  HaveSeenSwiftErrorArg = true;
1282  FuncInfo->SwiftErrorArg = &*AI;
1283  FuncInfo->SwiftErrorVals.push_back(&*AI);
1284  }
1285 
1286  for (const auto &LLVMBB : Fn)
1287  for (const auto &Inst : LLVMBB) {
1288  if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst))
1289  if (Alloca->isSwiftError())
1290  FuncInfo->SwiftErrorVals.push_back(Alloca);
1291  }
1292 }
1293 
1295  FastISel *FastIS,
1296  const TargetLowering *TLI,
1297  const TargetInstrInfo *TII,
1299  if (!TLI->supportSwiftError())
1300  return;
1301 
1302  // We only need to do this when we have swifterror parameter or swifterror
1303  // alloc.
1304  if (FuncInfo->SwiftErrorVals.empty())
1305  return;
1306 
1307  assert(FuncInfo->MBB == &*FuncInfo->MF->begin() &&
1308  "expected to insert into entry block");
1309  auto &DL = FuncInfo->MF->getDataLayout();
1310  auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
1311  for (const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
1312  // We will always generate a copy from the argument. It is always used at
1313  // least by the 'return' of the swifterror.
1314  if (FuncInfo->SwiftErrorArg && FuncInfo->SwiftErrorArg == SwiftErrorVal)
1315  continue;
1316  unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
1317  // Assign Undef to Vreg. We construct MI directly to make sure it works
1318  // with FastISel.
1319  BuildMI(*FuncInfo->MBB, FuncInfo->MBB->getFirstNonPHI(),
1320  SDB->getCurDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF),
1321  VReg);
1322 
1323  // Keep FastIS informed about the value we just inserted.
1324  if (FastIS)
1325  FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1326 
1327  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorVal, VReg);
1328  }
1329 }
1330 
1331 /// Collect llvm.dbg.declare information. This is done after argument lowering
1332 /// in case the declarations refer to arguments.
1334  MachineFunction *MF = FuncInfo->MF;
1335  const DataLayout &DL = MF->getDataLayout();
1336  for (const BasicBlock &BB : *FuncInfo->Fn) {
1337  for (const Instruction &I : BB) {
1338  const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(&I);
1339  if (!DI)
1340  continue;
1341 
1342  assert(DI->getVariable() && "Missing variable");
1343  assert(DI->getDebugLoc() && "Missing location");
1344  const Value *Address = DI->getAddress();
1345  if (!Address)
1346  continue;
1347 
1348  // Look through casts and constant offset GEPs. These mostly come from
1349  // inalloca.
1350  APInt Offset(DL.getTypeSizeInBits(Address->getType()), 0);
1351  Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1352 
1353  // Check if the variable is a static alloca or a byval or inalloca
1354  // argument passed in memory. If it is not, then we will ignore this
1355  // intrinsic and handle this during isel like dbg.value.
1356  int FI = std::numeric_limits<int>::max();
1357  if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
1358  auto SI = FuncInfo->StaticAllocaMap.find(AI);
1359  if (SI != FuncInfo->StaticAllocaMap.end())
1360  FI = SI->second;
1361  } else if (const auto *Arg = dyn_cast<Argument>(Address))
1362  FI = FuncInfo->getArgumentFrameIndex(Arg);
1363 
1364  if (FI == std::numeric_limits<int>::max())
1365  continue;
1366 
1367  DIExpression *Expr = DI->getExpression();
1368  if (Offset.getBoolValue())
1370  Offset.getZExtValue());
1371  MF->setVariableDbgInfo(DI->getVariable(), Expr, FI, DI->getDebugLoc());
1372  }
1373  }
1374 }
1375 
1376 /// Propagate swifterror values through the machine function CFG.
1378  auto *TLI = FuncInfo->TLI;
1379  if (!TLI->supportSwiftError())
1380  return;
1381 
1382  // We only need to do this when we have swifterror parameter or swifterror
1383  // alloc.
1384  if (FuncInfo->SwiftErrorVals.empty())
1385  return;
1386 
1387  // For each machine basic block in reverse post order.
1389  for (MachineBasicBlock *MBB : RPOT) {
1390  // For each swifterror value in the function.
1391  for(const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
1392  auto Key = std::make_pair(MBB, SwiftErrorVal);
1393  auto UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
1394  auto VRegDefIt = FuncInfo->SwiftErrorVRegDefMap.find(Key);
1395  bool UpwardsUse = UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end();
1396  unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
1397  bool DownwardDef = VRegDefIt != FuncInfo->SwiftErrorVRegDefMap.end();
1398  assert(!(UpwardsUse && !DownwardDef) &&
1399  "We can't have an upwards use but no downwards def");
1400 
1401  // If there is no upwards exposed use and an entry for the swifterror in
1402  // the def map for this value we don't need to do anything: We already
1403  // have a downward def for this basic block.
1404  if (!UpwardsUse && DownwardDef)
1405  continue;
1406 
1407  // Otherwise we either have an upwards exposed use vreg that we need to
1408  // materialize or need to forward the downward def from predecessors.
1409 
1410  // Check whether we have a single vreg def from all predecessors.
1411  // Otherwise we need a phi.
1414  for (auto *Pred : MBB->predecessors()) {
1415  if (!Visited.insert(Pred).second)
1416  continue;
1417  VRegs.push_back(std::make_pair(
1418  Pred, FuncInfo->getOrCreateSwiftErrorVReg(Pred, SwiftErrorVal)));
1419  if (Pred != MBB)
1420  continue;
1421  // We have a self-edge.
1422  // If there was no upwards use in this basic block there is now one: the
1423  // phi needs to use it self.
1424  if (!UpwardsUse) {
1425  UpwardsUse = true;
1426  UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
1427  assert(UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end());
1428  UUseVReg = UUseIt->second;
1429  }
1430  }
1431 
1432  // We need a phi node if we have more than one predecessor with different
1433  // downward defs.
1434  bool needPHI =
1435  VRegs.size() >= 1 &&
1436  std::find_if(
1437  VRegs.begin(), VRegs.end(),
1438  [&](const std::pair<const MachineBasicBlock *, unsigned> &V)
1439  -> bool { return V.second != VRegs[0].second; }) !=
1440  VRegs.end();
1441 
1442  // If there is no upwards exposed used and we don't need a phi just
1443  // forward the swifterror vreg from the predecessor(s).
1444  if (!UpwardsUse && !needPHI) {
1445  assert(!VRegs.empty() &&
1446  "No predecessors? The entry block should bail out earlier");
1447  // Just forward the swifterror vreg from the predecessor(s).
1448  FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, VRegs[0].second);
1449  continue;
1450  }
1451 
1452  auto DLoc = isa<Instruction>(SwiftErrorVal)
1453  ? cast<Instruction>(SwiftErrorVal)->getDebugLoc()
1454  : DebugLoc();
1455  const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo();
1456 
1457  // If we don't need a phi create a copy to the upward exposed vreg.
1458  if (!needPHI) {
1459  assert(UpwardsUse);
1460  assert(!VRegs.empty() &&
1461  "No predecessors? Is the Calling Convention correct?");
1462  unsigned DestReg = UUseVReg;
1463  BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY),
1464  DestReg)
1465  .addReg(VRegs[0].second);
1466  continue;
1467  }
1468 
1469  // We need a phi: if there is an upwards exposed use we already have a
1470  // destination virtual register number otherwise we generate a new one.
1471  auto &DL = FuncInfo->MF->getDataLayout();
1472  auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
1473  unsigned PHIVReg =
1474  UpwardsUse ? UUseVReg
1475  : FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
1476  MachineInstrBuilder SwiftErrorPHI =
1477  BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
1478  TII->get(TargetOpcode::PHI), PHIVReg);
1479  for (auto BBRegPair : VRegs) {
1480  SwiftErrorPHI.addReg(BBRegPair.second).addMBB(BBRegPair.first);
1481  }
1482 
1483  // We did not have a definition in this block before: store the phi's vreg
1484  // as this block downward exposed def.
1485  if (!UpwardsUse)
1486  FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, PHIVReg);
1487  }
1488  }
1489 }
1490 
1495  if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty())
1496  return;
1497 
1498  // Iterator over instructions and assign vregs to swifterror defs and uses.
1499  for (auto It = Begin; It != End; ++It) {
1500  ImmutableCallSite CS(&*It);
1501  if (CS) {
1502  // A call-site with a swifterror argument is both use and def.
1503  const Value *SwiftErrorAddr = nullptr;
1504  for (auto &Arg : CS.args()) {
1505  if (!Arg->isSwiftError())
1506  continue;
1507  // Use of swifterror.
1508  assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments");
1509  SwiftErrorAddr = &*Arg;
1510  assert(SwiftErrorAddr->isSwiftError() &&
1511  "Must have a swifterror value argument");
1512  unsigned VReg; bool CreatedReg;
1513  std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
1514  &*It, FuncInfo->MBB, SwiftErrorAddr);
1515  assert(CreatedReg);
1516  }
1517  if (!SwiftErrorAddr)
1518  continue;
1519 
1520  // Def of swifterror.
1521  unsigned VReg; bool CreatedReg;
1522  std::tie(VReg, CreatedReg) =
1523  FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
1524  assert(CreatedReg);
1525  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
1526 
1527  // A load is a use.
1528  } else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) {
1529  const Value *V = LI->getOperand(0);
1530  if (!V->isSwiftError())
1531  continue;
1532 
1533  unsigned VReg; bool CreatedReg;
1534  std::tie(VReg, CreatedReg) =
1535  FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V);
1536  assert(CreatedReg);
1537 
1538  // A store is a def.
1539  } else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) {
1540  const Value *SwiftErrorAddr = SI->getOperand(1);
1541  if (!SwiftErrorAddr->isSwiftError())
1542  continue;
1543 
1544  // Def of swifterror.
1545  unsigned VReg; bool CreatedReg;
1546  std::tie(VReg, CreatedReg) =
1547  FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
1548  assert(CreatedReg);
1549  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
1550 
1551  // A return in a swiferror returning function is a use.
1552  } else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) {
1553  const Function *F = R->getParent()->getParent();
1554  if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1555  continue;
1556 
1557  unsigned VReg; bool CreatedReg;
1558  std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
1559  R, FuncInfo->MBB, FuncInfo->SwiftErrorArg);
1560  assert(CreatedReg);
1561  }
1562  }
1563 }
1564 
1565 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1566  FastISelFailed = false;
1567  // Initialize the Fast-ISel state, if needed.
1568  FastISel *FastIS = nullptr;
1569  if (TM.Options.EnableFastISel) {
1570  LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1571  FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1572  }
1573 
1575 
1577 
1578  // Lower arguments up front. An RPO iteration always visits the entry block
1579  // first.
1580  assert(*RPOT.begin() == &Fn.getEntryBlock());
1581  ++NumEntryBlocks;
1582 
1583  // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1586 
1588 
1589  if (!FastIS) {
1590  LowerArguments(Fn);
1591  } else {
1592  // See if fast isel can lower the arguments.
1593  FastIS->startNewBlock();
1594  if (!FastIS->lowerArguments()) {
1595  FastISelFailed = true;
1596  // Fast isel failed to lower these arguments
1597  ++NumFastIselFailLowerArguments;
1598 
1599  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1600  Fn.getSubprogram(),
1601  &Fn.getEntryBlock());
1602  R << "FastISel didn't lower all arguments: "
1603  << ore::NV("Prototype", Fn.getType());
1605 
1606  // Use SelectionDAG argument lowering
1607  LowerArguments(Fn);
1609  SDB->clear();
1610  CodeGenAndEmitDAG();
1611  }
1612 
1613  // If we inserted any instructions at the beginning, make a note of
1614  // where they are, so we can be sure to emit subsequent instructions
1615  // after them.
1616  if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1617  FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1618  else
1619  FastIS->setLastLocalValue(nullptr);
1620  }
1622 
1624 
1625  // Iterate over all basic blocks in the function.
1626  StackProtector &SP = getAnalysis<StackProtector>();
1627  for (const BasicBlock *LLVMBB : RPOT) {
1628  if (OptLevel != CodeGenOpt::None) {
1629  bool AllPredsVisited = true;
1630  for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
1631  PI != PE; ++PI) {
1632  if (!FuncInfo->VisitedBBs.count(*PI)) {
1633  AllPredsVisited = false;
1634  break;
1635  }
1636  }
1637 
1638  if (AllPredsVisited) {
1639  for (const PHINode &PN : LLVMBB->phis())
1641  } else {
1642  for (const PHINode &PN : LLVMBB->phis())
1644  }
1645 
1646  FuncInfo->VisitedBBs.insert(LLVMBB);
1647  }
1648 
1649  BasicBlock::const_iterator const Begin =
1650  LLVMBB->getFirstNonPHI()->getIterator();
1651  BasicBlock::const_iterator const End = LLVMBB->end();
1652  BasicBlock::const_iterator BI = End;
1653 
1654  FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1655  if (!FuncInfo->MBB)
1656  continue; // Some blocks like catchpads have no code or MBB.
1657 
1658  // Insert new instructions after any phi or argument setup code.
1659  FuncInfo->InsertPt = FuncInfo->MBB->end();
1660 
1661  // Setup an EH landing-pad block.
1664  if (LLVMBB->isEHPad())
1665  if (!PrepareEHLandingPad())
1666  continue;
1667 
1668  // Before doing SelectionDAG ISel, see if FastISel has been requested.
1669  if (FastIS) {
1670  if (LLVMBB != &Fn.getEntryBlock())
1671  FastIS->startNewBlock();
1672 
1673  unsigned NumFastIselRemaining = std::distance(Begin, End);
1674 
1675  // Pre-assign swifterror vregs.
1676  preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End);
1677 
1678  // Do FastISel on as many instructions as possible.
1679  for (; BI != Begin; --BI) {
1680  const Instruction *Inst = &*std::prev(BI);
1681 
1682  // If we no longer require this instruction, skip it.
1683  if (isFoldedOrDeadInstruction(Inst, FuncInfo) ||
1684  ElidedArgCopyInstrs.count(Inst)) {
1685  --NumFastIselRemaining;
1686  continue;
1687  }
1688 
1689  // Bottom-up: reset the insert pos at the top, after any local-value
1690  // instructions.
1691  FastIS->recomputeInsertPt();
1692 
1693  // Try to select the instruction with FastISel.
1694  if (FastIS->selectInstruction(Inst)) {
1695  --NumFastIselRemaining;
1696  ++NumFastIselSuccess;
1697  // If fast isel succeeded, skip over all the folded instructions, and
1698  // then see if there is a load right before the selected instructions.
1699  // Try to fold the load if so.
1700  const Instruction *BeforeInst = Inst;
1701  while (BeforeInst != &*Begin) {
1702  BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
1703  if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
1704  break;
1705  }
1706  if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
1707  BeforeInst->hasOneUse() &&
1708  FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
1709  // If we succeeded, don't re-select the load.
1710  BI = std::next(BasicBlock::const_iterator(BeforeInst));
1711  --NumFastIselRemaining;
1712  ++NumFastIselSuccess;
1713  }
1714  continue;
1715  }
1716 
1717  FastISelFailed = true;
1718 
1719  // Then handle certain instructions as single-LLVM-Instruction blocks.
1720  // We cannot separate out GCrelocates to their own blocks since we need
1721  // to keep track of gc-relocates for a particular gc-statepoint. This is
1722  // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1723  // visitGCRelocate.
1724  if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst)) {
1725  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1726  Inst->getDebugLoc(), LLVMBB);
1727 
1728  R << "FastISel missed call";
1729 
1730  if (R.isEnabled() || EnableFastISelAbort) {
1731  std::string InstStrStorage;
1732  raw_string_ostream InstStr(InstStrStorage);
1733  InstStr << *Inst;
1734 
1735  R << ": " << InstStr.str();
1736  }
1737 
1739 
1740  if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1741  !Inst->use_empty()) {
1742  unsigned &R = FuncInfo->ValueMap[Inst];
1743  if (!R)
1744  R = FuncInfo->CreateRegs(Inst->getType());
1745  }
1746 
1747  bool HadTailCall = false;
1749  SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
1750 
1751  // If the call was emitted as a tail call, we're done with the block.
1752  // We also need to delete any previously emitted instructions.
1753  if (HadTailCall) {
1754  FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
1755  --BI;
1756  break;
1757  }
1758 
1759  // Recompute NumFastIselRemaining as Selection DAG instruction
1760  // selection may have handled the call, input args, etc.
1761  unsigned RemainingNow = std::distance(Begin, BI);
1762  NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1763  NumFastIselRemaining = RemainingNow;
1764  continue;
1765  }
1766 
1767  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1768  Inst->getDebugLoc(), LLVMBB);
1769 
1770  bool ShouldAbort = EnableFastISelAbort;
1771  if (Inst->isTerminator()) {
1772  // Use a different message for terminator misses.
1773  R << "FastISel missed terminator";
1774  // Don't abort for terminator unless the level is really high
1775  ShouldAbort = (EnableFastISelAbort > 2);
1776  } else {
1777  R << "FastISel missed";
1778  }
1779 
1780  if (R.isEnabled() || EnableFastISelAbort) {
1781  std::string InstStrStorage;
1782  raw_string_ostream InstStr(InstStrStorage);
1783  InstStr << *Inst;
1784  R << ": " << InstStr.str();
1785  }
1786 
1787  reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
1788 
1789  NumFastIselFailures += NumFastIselRemaining;
1790  break;
1791  }
1792 
1793  FastIS->recomputeInsertPt();
1794  }
1795 
1796  if (SP.shouldEmitSDCheck(*LLVMBB)) {
1797  bool FunctionBasedInstrumentation =
1799  SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
1800  FunctionBasedInstrumentation);
1801  }
1802 
1803  if (Begin != BI)
1804  ++NumDAGBlocks;
1805  else
1806  ++NumFastIselBlocks;
1807 
1808  if (Begin != BI) {
1809  // Run SelectionDAG instruction selection on the remainder of the block
1810  // not handled by FastISel. If FastISel is not run, this is the entire
1811  // block.
1812  bool HadTailCall;
1813  SelectBasicBlock(Begin, BI, HadTailCall);
1814 
1815  // But if FastISel was run, we already selected some of the block.
1816  // If we emitted a tail-call, we need to delete any previously emitted
1817  // instruction that follows it.
1818  if (HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1820  }
1821 
1822  if (FastIS)
1823  FastIS->finishBasicBlock();
1824  FinishBasicBlock();
1825  FuncInfo->PHINodesToUpdate.clear();
1826  ElidedArgCopyInstrs.clear();
1827  }
1828 
1830 
1832 
1833  delete FastIS;
1835  SDB->SPDescriptor.resetPerFunctionState();
1836 }
1837 
1838 /// Given that the input MI is before a partial terminator sequence TSeq, return
1839 /// true if M + TSeq also a partial terminator sequence.
1840 ///
1841 /// A Terminator sequence is a sequence of MachineInstrs which at this point in
1842 /// lowering copy vregs into physical registers, which are then passed into
1843 /// terminator instructors so we can satisfy ABI constraints. A partial
1844 /// terminator sequence is an improper subset of a terminator sequence (i.e. it
1845 /// may be the whole terminator sequence).
1847  // If we do not have a copy or an implicit def, we return true if and only if
1848  // MI is a debug value.
1849  if (!MI.isCopy() && !MI.isImplicitDef())
1850  // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
1851  // physical registers if there is debug info associated with the terminator
1852  // of our mbb. We want to include said debug info in our terminator
1853  // sequence, so we return true in that case.
1854  return MI.isDebugValue();
1855 
1856  // We have left the terminator sequence if we are not doing one of the
1857  // following:
1858  //
1859  // 1. Copying a vreg into a physical register.
1860  // 2. Copying a vreg into a vreg.
1861  // 3. Defining a register via an implicit def.
1862 
1863  // OPI should always be a register definition...
1865  if (!OPI->isReg() || !OPI->isDef())
1866  return false;
1867 
1868  // Defining any register via an implicit def is always ok.
1869  if (MI.isImplicitDef())
1870  return true;
1871 
1872  // Grab the copy source...
1874  ++OPI2;
1875  assert(OPI2 != MI.operands_end()
1876  && "Should have a copy implying we should have 2 arguments.");
1877 
1878  // Make sure that the copy dest is not a vreg when the copy source is a
1879  // physical register.
1880  if (!OPI2->isReg() ||
1883  return false;
1884 
1885  return true;
1886 }
1887 
1888 /// Find the split point at which to splice the end of BB into its success stack
1889 /// protector check machine basic block.
1890 ///
1891 /// On many platforms, due to ABI constraints, terminators, even before register
1892 /// allocation, use physical registers. This creates an issue for us since
1893 /// physical registers at this point can not travel across basic
1894 /// blocks. Luckily, selectiondag always moves physical registers into vregs
1895 /// when they enter functions and moves them through a sequence of copies back
1896 /// into the physical registers right before the terminator creating a
1897 /// ``Terminator Sequence''. This function is searching for the beginning of the
1898 /// terminator sequence so that we can ensure that we splice off not just the
1899 /// terminator, but additionally the copies that move the vregs into the
1900 /// physical registers.
1904  //
1905  if (SplitPoint == BB->begin())
1906  return SplitPoint;
1907 
1908  MachineBasicBlock::iterator Start = BB->begin();
1909  MachineBasicBlock::iterator Previous = SplitPoint;
1910  --Previous;
1911 
1912  while (MIIsInTerminatorSequence(*Previous)) {
1913  SplitPoint = Previous;
1914  if (Previous == Start)
1915  break;
1916  --Previous;
1917  }
1918 
1919  return SplitPoint;
1920 }
1921 
1922 void
1923 SelectionDAGISel::FinishBasicBlock() {
1924  LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1925  << FuncInfo->PHINodesToUpdate.size() << "\n";
1926  for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1927  ++i) dbgs()
1928  << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1929  << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1930 
1931  // Next, now that we know what the last MBB the LLVM BB expanded is, update
1932  // PHI nodes in successors.
1933  for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1935  assert(PHI->isPHI() &&
1936  "This is not a machine PHI node that we are updating!");
1937  if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
1938  continue;
1939  PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
1940  }
1941 
1942  // Handle stack protector.
1943  if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1944  // The target provides a guard check function. There is no need to
1945  // generate error handling code or to split current basic block.
1946  MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1947 
1948  // Add load and check to the basicblock.
1949  FuncInfo->MBB = ParentMBB;
1950  FuncInfo->InsertPt =
1953  CurDAG->setRoot(SDB->getRoot());
1954  SDB->clear();
1955  CodeGenAndEmitDAG();
1956 
1957  // Clear the Per-BB State.
1958  SDB->SPDescriptor.resetPerBBState();
1959  } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1960  MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1961  MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1962 
1963  // Find the split point to split the parent mbb. At the same time copy all
1964  // physical registers used in the tail of parent mbb into virtual registers
1965  // before the split point and back into physical registers after the split
1966  // point. This prevents us needing to deal with Live-ins and many other
1967  // register allocation issues caused by us splitting the parent mbb. The
1968  // register allocator will clean up said virtual copies later on.
1969  MachineBasicBlock::iterator SplitPoint =
1971 
1972  // Splice the terminator of ParentMBB into SuccessMBB.
1973  SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
1974  SplitPoint,
1975  ParentMBB->end());
1976 
1977  // Add compare/jump on neq/jump to the parent BB.
1978  FuncInfo->MBB = ParentMBB;
1979  FuncInfo->InsertPt = ParentMBB->end();
1981  CurDAG->setRoot(SDB->getRoot());
1982  SDB->clear();
1983  CodeGenAndEmitDAG();
1984 
1985  // CodeGen Failure MBB if we have not codegened it yet.
1986  MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1987  if (FailureMBB->empty()) {
1988  FuncInfo->MBB = FailureMBB;
1989  FuncInfo->InsertPt = FailureMBB->end();
1991  CurDAG->setRoot(SDB->getRoot());
1992  SDB->clear();
1993  CodeGenAndEmitDAG();
1994  }
1995 
1996  // Clear the Per-BB State.
1997  SDB->SPDescriptor.resetPerBBState();
1998  }
1999 
2000  // Lower each BitTestBlock.
2001  for (auto &BTB : SDB->BitTestCases) {
2002  // Lower header first, if it wasn't already lowered
2003  if (!BTB.Emitted) {
2004  // Set the current basic block to the mbb we wish to insert the code into
2005  FuncInfo->MBB = BTB.Parent;
2006  FuncInfo->InsertPt = FuncInfo->MBB->end();
2007  // Emit the code
2009  CurDAG->setRoot(SDB->getRoot());
2010  SDB->clear();
2011  CodeGenAndEmitDAG();
2012  }
2013 
2014  BranchProbability UnhandledProb = BTB.Prob;
2015  for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
2016  UnhandledProb -= BTB.Cases[j].ExtraProb;
2017  // Set the current basic block to the mbb we wish to insert the code into
2018  FuncInfo->MBB = BTB.Cases[j].ThisBB;
2019  FuncInfo->InsertPt = FuncInfo->MBB->end();
2020  // Emit the code
2021 
2022  // If all cases cover a contiguous range, it is not necessary to jump to
2023  // the default block after the last bit test fails. This is because the
2024  // range check during bit test header creation has guaranteed that every
2025  // case here doesn't go outside the range. In this case, there is no need
2026  // to perform the last bit test, as it will always be true. Instead, make
2027  // the second-to-last bit-test fall through to the target of the last bit
2028  // test, and delete the last bit test.
2029 
2030  MachineBasicBlock *NextMBB;
2031  if (BTB.ContiguousRange && j + 2 == ej) {
2032  // Second-to-last bit-test with contiguous range: fall through to the
2033  // target of the final bit test.
2034  NextMBB = BTB.Cases[j + 1].TargetBB;
2035  } else if (j + 1 == ej) {
2036  // For the last bit test, fall through to Default.
2037  NextMBB = BTB.Default;
2038  } else {
2039  // Otherwise, fall through to the next bit test.
2040  NextMBB = BTB.Cases[j + 1].ThisBB;
2041  }
2042 
2043  SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
2044  FuncInfo->MBB);
2045 
2046  CurDAG->setRoot(SDB->getRoot());
2047  SDB->clear();
2048  CodeGenAndEmitDAG();
2049 
2050  if (BTB.ContiguousRange && j + 2 == ej) {
2051  // Since we're not going to use the final bit test, remove it.
2052  BTB.Cases.pop_back();
2053  break;
2054  }
2055  }
2056 
2057  // Update PHI Nodes
2058  for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
2059  pi != pe; ++pi) {
2060  MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
2061  MachineBasicBlock *PHIBB = PHI->getParent();
2062  assert(PHI->isPHI() &&
2063  "This is not a machine PHI node that we are updating!");
2064  // This is "default" BB. We have two jumps to it. From "header" BB and
2065  // from last "case" BB, unless the latter was skipped.
2066  if (PHIBB == BTB.Default) {
2067  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent);
2068  if (!BTB.ContiguousRange) {
2069  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
2070  .addMBB(BTB.Cases.back().ThisBB);
2071  }
2072  }
2073  // One of "cases" BB.
2074  for (unsigned j = 0, ej = BTB.Cases.size();
2075  j != ej; ++j) {
2076  MachineBasicBlock* cBB = BTB.Cases[j].ThisBB;
2077  if (cBB->isSuccessor(PHIBB))
2078  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB);
2079  }
2080  }
2081  }
2082  SDB->BitTestCases.clear();
2083 
2084  // If the JumpTable record is filled in, then we need to emit a jump table.
2085  // Updating the PHI nodes is tricky in this case, since we need to determine
2086  // whether the PHI is a successor of the range check MBB or the jump table MBB
2087  for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
2088  // Lower header first, if it wasn't already lowered
2089  if (!SDB->JTCases[i].first.Emitted) {
2090  // Set the current basic block to the mbb we wish to insert the code into
2091  FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
2092  FuncInfo->InsertPt = FuncInfo->MBB->end();
2093  // Emit the code
2094  SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
2095  FuncInfo->MBB);
2096  CurDAG->setRoot(SDB->getRoot());
2097  SDB->clear();
2098  CodeGenAndEmitDAG();
2099  }
2100 
2101  // Set the current basic block to the mbb we wish to insert the code into
2102  FuncInfo->MBB = SDB->JTCases[i].second.MBB;
2103  FuncInfo->InsertPt = FuncInfo->MBB->end();
2104  // Emit the code
2105  SDB->visitJumpTable(SDB->JTCases[i].second);
2106  CurDAG->setRoot(SDB->getRoot());
2107  SDB->clear();
2108  CodeGenAndEmitDAG();
2109 
2110  // Update PHI Nodes
2111  for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
2112  pi != pe; ++pi) {
2113  MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
2114  MachineBasicBlock *PHIBB = PHI->getParent();
2115  assert(PHI->isPHI() &&
2116  "This is not a machine PHI node that we are updating!");
2117  // "default" BB. We can go there only from header BB.
2118  if (PHIBB == SDB->JTCases[i].second.Default)
2119  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
2120  .addMBB(SDB->JTCases[i].first.HeaderBB);
2121  // JT BB. Just iterate over successors here
2122  if (FuncInfo->MBB->isSuccessor(PHIBB))
2123  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
2124  }
2125  }
2126  SDB->JTCases.clear();
2127 
2128  // If we generated any switch lowering information, build and codegen any
2129  // additional DAGs necessary.
2130  for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
2131  // Set the current basic block to the mbb we wish to insert the code into
2132  FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
2133  FuncInfo->InsertPt = FuncInfo->MBB->end();
2134 
2135  // Determine the unique successors.
2137  Succs.push_back(SDB->SwitchCases[i].TrueBB);
2138  if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
2139  Succs.push_back(SDB->SwitchCases[i].FalseBB);
2140 
2141  // Emit the code. Note that this could result in FuncInfo->MBB being split.
2143  CurDAG->setRoot(SDB->getRoot());
2144  SDB->clear();
2145  CodeGenAndEmitDAG();
2146 
2147  // Remember the last block, now that any splitting is done, for use in
2148  // populating PHI nodes in successors.
2149  MachineBasicBlock *ThisBB = FuncInfo->MBB;
2150 
2151  // Handle any PHI nodes in successors of this chunk, as if we were coming
2152  // from the original BB before switch expansion. Note that PHI nodes can
2153  // occur multiple times in PHINodesToUpdate. We have to be very careful to
2154  // handle them the right number of times.
2155  for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
2156  FuncInfo->MBB = Succs[i];
2157  FuncInfo->InsertPt = FuncInfo->MBB->end();
2158  // FuncInfo->MBB may have been removed from the CFG if a branch was
2159  // constant folded.
2160  if (ThisBB->isSuccessor(FuncInfo->MBB)) {
2162  MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
2163  MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
2164  MachineInstrBuilder PHI(*MF, MBBI);
2165  // This value for this PHI node is recorded in PHINodesToUpdate.
2166  for (unsigned pn = 0; ; ++pn) {
2167  assert(pn != FuncInfo->PHINodesToUpdate.size() &&
2168  "Didn't find PHI entry!");
2169  if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
2170  PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
2171  break;
2172  }
2173  }
2174  }
2175  }
2176  }
2177  }
2178  SDB->SwitchCases.clear();
2179 }
2180 
2181 /// Create the scheduler. If a specific scheduler was specified
2182 /// via the SchedulerRegistry, use it, otherwise select the
2183 /// one preferred by the target.
2184 ///
2185 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
2186  return ISHeuristic(this, OptLevel);
2187 }
2188 
2189 //===----------------------------------------------------------------------===//
2190 // Helper functions used by the generated instruction selector.
2191 //===----------------------------------------------------------------------===//
2192 // Calls to these methods are generated by tblgen.
2193 
2194 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
2195 /// the dag combiner simplified the 255, we still want to match. RHS is the
2196 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2197 /// specified in the .td file (e.g. 255).
2199  int64_t DesiredMaskS) const {
2200  const APInt &ActualMask = RHS->getAPIntValue();
2201  const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2202 
2203  // If the actual mask exactly matches, success!
2204  if (ActualMask == DesiredMask)
2205  return true;
2206 
2207  // If the actual AND mask is allowing unallowed bits, this doesn't match.
2208  if (!ActualMask.isSubsetOf(DesiredMask))
2209  return false;
2210 
2211  // Otherwise, the DAG Combiner may have proven that the value coming in is
2212  // either already zero or is not demanded. Check for known zero input bits.
2213  APInt NeededMask = DesiredMask & ~ActualMask;
2214  if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
2215  return true;
2216 
2217  // TODO: check to see if missing bits are just not demanded.
2218 
2219  // Otherwise, this pattern doesn't match.
2220  return false;
2221 }
2222 
2223 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
2224 /// the dag combiner simplified the 255, we still want to match. RHS is the
2225 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2226 /// specified in the .td file (e.g. 255).
2228  int64_t DesiredMaskS) const {
2229  const APInt &ActualMask = RHS->getAPIntValue();
2230  const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2231 
2232  // If the actual mask exactly matches, success!
2233  if (ActualMask == DesiredMask)
2234  return true;
2235 
2236  // If the actual AND mask is allowing unallowed bits, this doesn't match.
2237  if (!ActualMask.isSubsetOf(DesiredMask))
2238  return false;
2239 
2240  // Otherwise, the DAG Combiner may have proven that the value coming in is
2241  // either already zero or is not demanded. Check for known zero input bits.
2242  APInt NeededMask = DesiredMask & ~ActualMask;
2243  KnownBits Known = CurDAG->computeKnownBits(LHS);
2244 
2245  // If all the missing bits in the or are already known to be set, match!
2246  if (NeededMask.isSubsetOf(Known.One))
2247  return true;
2248 
2249  // TODO: check to see if missing bits are just not demanded.
2250 
2251  // Otherwise, this pattern doesn't match.
2252  return false;
2253 }
2254 
2255 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2256 /// by tblgen. Others should not call it.
2258  const SDLoc &DL) {
2259  std::vector<SDValue> InOps;
2260  std::swap(InOps, Ops);
2261 
2262  Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
2263  Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
2264  Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
2265  Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2266 
2267  unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2268  if (InOps[e-1].getValueType() == MVT::Glue)
2269  --e; // Don't process a glue operand if it is here.
2270 
2271  while (i != e) {
2272  unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
2273  if (!InlineAsm::isMemKind(Flags)) {
2274  // Just skip over this operand, copying the operands verbatim.
2275  Ops.insert(Ops.end(), InOps.begin()+i,
2276  InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
2277  i += InlineAsm::getNumOperandRegisters(Flags) + 1;
2278  } else {
2280  "Memory operand with multiple values?");
2281 
2282  unsigned TiedToOperand;
2283  if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
2284  // We need the constraint ID from the operand this is tied to.
2285  unsigned CurOp = InlineAsm::Op_FirstOperand;
2286  Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2287  for (; TiedToOperand; --TiedToOperand) {
2288  CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
2289  Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2290  }
2291  }
2292 
2293  // Otherwise, this is a memory operand. Ask the target to select it.
2294  std::vector<SDValue> SelOps;
2295  unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
2296  if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
2297  report_fatal_error("Could not match memory address. Inline asm"
2298  " failure!");
2299 
2300  // Add this to the output node.
2301  unsigned NewFlags =
2303  NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
2304  Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
2305  Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
2306  i += 2;
2307  }
2308  }
2309 
2310  // Add the glue input back if present.
2311  if (e != InOps.size())
2312  Ops.push_back(InOps.back());
2313 }
2314 
2315 /// findGlueUse - Return use of MVT::Glue value produced by the specified
2316 /// SDNode.
2317 ///
2319  unsigned FlagResNo = N->getNumValues()-1;
2320  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2321  SDUse &Use = I.getUse();
2322  if (Use.getResNo() == FlagResNo)
2323  return Use.getUser();
2324  }
2325  return nullptr;
2326 }
2327 
2328 /// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2329 /// beyond "ImmedUse". We may ignore chains as they are checked separately.
2330 static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2331  bool IgnoreChains) {
2334  // Only check if we have non-immediate uses of Def.
2335  if (ImmedUse->isOnlyUserOf(Def))
2336  return false;
2337 
2338  // We don't care about paths to Def that go through ImmedUse so mark it
2339  // visited and mark non-def operands as used.
2340  Visited.insert(ImmedUse);
2341  for (const SDValue &Op : ImmedUse->op_values()) {
2342  SDNode *N = Op.getNode();
2343  // Ignore chain deps (they are validated by
2344  // HandleMergeInputChains) and immediate uses
2345  if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2346  continue;
2347  if (!Visited.insert(N).second)
2348  continue;
2349  WorkList.push_back(N);
2350  }
2351 
2352  // Initialize worklist to operands of Root.
2353  if (Root != ImmedUse) {
2354  for (const SDValue &Op : Root->op_values()) {
2355  SDNode *N = Op.getNode();
2356  // Ignore chains (they are validated by HandleMergeInputChains)
2357  if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2358  continue;
2359  if (!Visited.insert(N).second)
2360  continue;
2361  WorkList.push_back(N);
2362  }
2363  }
2364 
2365  return SDNode::hasPredecessorHelper(Def, Visited, WorkList, 0, true);
2366 }
2367 
2368 /// IsProfitableToFold - Returns true if it's profitable to fold the specific
2369 /// operand node N of U during instruction selection that starts at Root.
2371  SDNode *Root) const {
2372  if (OptLevel == CodeGenOpt::None) return false;
2373  return N.hasOneUse();
2374 }
2375 
2376 /// IsLegalToFold - Returns true if the specific operand node N of
2377 /// U can be folded during instruction selection that starts at Root.
2380  bool IgnoreChains) {
2381  if (OptLevel == CodeGenOpt::None) return false;
2382 
2383  // If Root use can somehow reach N through a path that that doesn't contain
2384  // U then folding N would create a cycle. e.g. In the following
2385  // diagram, Root can reach N through X. If N is folded into Root, then
2386  // X is both a predecessor and a successor of U.
2387  //
2388  // [N*] //
2389  // ^ ^ //
2390  // / \ //
2391  // [U*] [X]? //
2392  // ^ ^ //
2393  // \ / //
2394  // \ / //
2395  // [Root*] //
2396  //
2397  // * indicates nodes to be folded together.
2398  //
2399  // If Root produces glue, then it gets (even more) interesting. Since it
2400  // will be "glued" together with its glue use in the scheduler, we need to
2401  // check if it might reach N.
2402  //
2403  // [N*] //
2404  // ^ ^ //
2405  // / \ //
2406  // [U*] [X]? //
2407  // ^ ^ //
2408  // \ \ //
2409  // \ | //
2410  // [Root*] | //
2411  // ^ | //
2412  // f | //
2413  // | / //
2414  // [Y] / //
2415  // ^ / //
2416  // f / //
2417  // | / //
2418  // [GU] //
2419  //
2420  // If GU (glue use) indirectly reaches N (the load), and Root folds N
2421  // (call it Fold), then X is a predecessor of GU and a successor of
2422  // Fold. But since Fold and GU are glued together, this will create
2423  // a cycle in the scheduling graph.
2424 
2425  // If the node has glue, walk down the graph to the "lowest" node in the
2426  // glueged set.
2427  EVT VT = Root->getValueType(Root->getNumValues()-1);
2428  while (VT == MVT::Glue) {
2429  SDNode *GU = findGlueUse(Root);
2430  if (!GU)
2431  break;
2432  Root = GU;
2433  VT = Root->getValueType(Root->getNumValues()-1);
2434 
2435  // If our query node has a glue result with a use, we've walked up it. If
2436  // the user (which has already been selected) has a chain or indirectly uses
2437  // the chain, HandleMergeInputChains will not consider it. Because of
2438  // this, we cannot ignore chains in this predicate.
2439  IgnoreChains = false;
2440  }
2441 
2442  return !findNonImmUse(Root, N.getNode(), U, IgnoreChains);
2443 }
2444 
2445 void SelectionDAGISel::Select_INLINEASM(SDNode *N, bool Branch) {
2446  SDLoc DL(N);
2447 
2448  std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2450 
2451  const EVT VTs[] = {MVT::Other, MVT::Glue};
2452  SDValue New = CurDAG->getNode(Branch ? ISD::INLINEASM_BR : ISD::INLINEASM, DL, VTs, Ops);
2453  New->setNodeId(-1);
2454  ReplaceUses(N, New.getNode());
2455  CurDAG->RemoveDeadNode(N);
2456 }
2457 
2458 void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2459  SDLoc dl(Op);
2461  const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2462  unsigned Reg =
2463  TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
2464  *CurDAG);
2465  SDValue New = CurDAG->getCopyFromReg(
2466  Op->getOperand(0), dl, Reg, Op->getValueType(0));
2467  New->setNodeId(-1);
2468  ReplaceUses(Op, New.getNode());
2469  CurDAG->RemoveDeadNode(Op);
2470 }
2471 
2472 void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2473  SDLoc dl(Op);
2475  const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2476  unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
2477  Op->getOperand(2).getValueType(),
2478  *CurDAG);
2479  SDValue New = CurDAG->getCopyToReg(
2480  Op->getOperand(0), dl, Reg, Op->getOperand(2));
2481  New->setNodeId(-1);
2482  ReplaceUses(Op, New.getNode());
2483  CurDAG->RemoveDeadNode(Op);
2484 }
2485 
2486 void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2487  CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2488 }
2489 
2490 /// GetVBR - decode a vbr encoding whose top bit is set.
2491 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline uint64_t
2492 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2493  assert(Val >= 128 && "Not a VBR");
2494  Val &= 127; // Remove first vbr bit.
2495 
2496  unsigned Shift = 7;
2497  uint64_t NextBits;
2498  do {
2499  NextBits = MatcherTable[Idx++];
2500  Val |= (NextBits&127) << Shift;
2501  Shift += 7;
2502  } while (NextBits & 128);
2503 
2504  return Val;
2505 }
2506 
2507 /// When a match is complete, this method updates uses of interior chain results
2508 /// to use the new results.
2509 void SelectionDAGISel::UpdateChains(
2510  SDNode *NodeToMatch, SDValue InputChain,
2511  SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2512  SmallVector<SDNode*, 4> NowDeadNodes;
2513 
2514  // Now that all the normal results are replaced, we replace the chain and
2515  // glue results if present.
2516  if (!ChainNodesMatched.empty()) {
2517  assert(InputChain.getNode() &&
2518  "Matched input chains but didn't produce a chain");
2519  // Loop over all of the nodes we matched that produced a chain result.
2520  // Replace all the chain results with the final chain we ended up with.
2521  for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2522  SDNode *ChainNode = ChainNodesMatched[i];
2523  // If ChainNode is null, it's because we replaced it on a previous
2524  // iteration and we cleared it out of the map. Just skip it.
2525  if (!ChainNode)
2526  continue;
2527 
2528  assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2529  "Deleted node left in chain");
2530 
2531  // Don't replace the results of the root node if we're doing a
2532  // MorphNodeTo.
2533  if (ChainNode == NodeToMatch && isMorphNodeTo)
2534  continue;
2535 
2536  SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2537  if (ChainVal.getValueType() == MVT::Glue)
2538  ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
2539  assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2541  *CurDAG, [&](SDNode *N, SDNode *E) {
2542  std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
2543  static_cast<SDNode *>(nullptr));
2544  });
2545  if (ChainNode->getOpcode() != ISD::TokenFactor)
2546  ReplaceUses(ChainVal, InputChain);
2547 
2548  // If the node became dead and we haven't already seen it, delete it.
2549  if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2550  !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
2551  NowDeadNodes.push_back(ChainNode);
2552  }
2553  }
2554 
2555  if (!NowDeadNodes.empty())
2556  CurDAG->RemoveDeadNodes(NowDeadNodes);
2557 
2558  LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2559 }
2560 
2561 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2562 /// operation for when the pattern matched at least one node with a chains. The
2563 /// input vector contains a list of all of the chained nodes that we match. We
2564 /// must determine if this is a valid thing to cover (i.e. matching it won't
2565 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2566 /// be used as the input node chain for the generated nodes.
2567 static SDValue
2569  SelectionDAG *CurDAG) {
2570 
2573  SmallVector<SDValue, 3> InputChains;
2574  unsigned int Max = 8192;
2575 
2576  // Quick exit on trivial merge.
2577  if (ChainNodesMatched.size() == 1)
2578  return ChainNodesMatched[0]->getOperand(0);
2579 
2580  // Add chains that aren't already added (internal). Peek through
2581  // token factors.
2582  std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2583  if (V.getValueType() != MVT::Other)
2584  return;
2585  if (V->getOpcode() == ISD::EntryToken)
2586  return;
2587  if (!Visited.insert(V.getNode()).second)
2588  return;
2589  if (V->getOpcode() == ISD::TokenFactor) {
2590  for (const SDValue &Op : V->op_values())
2591  AddChains(Op);
2592  } else
2593  InputChains.push_back(V);
2594  };
2595 
2596  for (auto *N : ChainNodesMatched) {
2597  Worklist.push_back(N);
2598  Visited.insert(N);
2599  }
2600 
2601  while (!Worklist.empty())
2602  AddChains(Worklist.pop_back_val()->getOperand(0));
2603 
2604  // Skip the search if there are no chain dependencies.
2605  if (InputChains.size() == 0)
2606  return CurDAG->getEntryNode();
2607 
2608  // If one of these chains is a successor of input, we must have a
2609  // node that is both the predecessor and successor of the
2610  // to-be-merged nodes. Fail.
2611  Visited.clear();
2612  for (SDValue V : InputChains)
2613  Worklist.push_back(V.getNode());
2614 
2615  for (auto *N : ChainNodesMatched)
2616  if (SDNode::hasPredecessorHelper(N, Visited, Worklist, Max, true))
2617  return SDValue();
2618 
2619  // Return merged chain.
2620  if (InputChains.size() == 1)
2621  return InputChains[0];
2622  return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2623  MVT::Other, InputChains);
2624 }
2625 
2626 /// MorphNode - Handle morphing a node in place for the selector.
2627 SDNode *SelectionDAGISel::
2628 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2629  ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2630  // It is possible we're using MorphNodeTo to replace a node with no
2631  // normal results with one that has a normal result (or we could be
2632  // adding a chain) and the input could have glue and chains as well.
2633  // In this case we need to shift the operands down.
2634  // FIXME: This is a horrible hack and broken in obscure cases, no worse
2635  // than the old isel though.
2636  int OldGlueResultNo = -1, OldChainResultNo = -1;
2637 
2638  unsigned NTMNumResults = Node->getNumValues();
2639  if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
2640  OldGlueResultNo = NTMNumResults-1;
2641  if (NTMNumResults != 1 &&
2642  Node->getValueType(NTMNumResults-2) == MVT::Other)
2643  OldChainResultNo = NTMNumResults-2;
2644  } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
2645  OldChainResultNo = NTMNumResults-1;
2646 
2647  // Call the underlying SelectionDAG routine to do the transmogrification. Note
2648  // that this deletes operands of the old node that become dead.
2649  SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
2650 
2651  // MorphNodeTo can operate in two ways: if an existing node with the
2652  // specified operands exists, it can just return it. Otherwise, it
2653  // updates the node in place to have the requested operands.
2654  if (Res == Node) {
2655  // If we updated the node in place, reset the node ID. To the isel,
2656  // this should be just like a newly allocated machine node.
2657  Res->setNodeId(-1);
2658  }
2659 
2660  unsigned ResNumResults = Res->getNumValues();
2661  // Move the glue if needed.
2662  if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2663  (unsigned)OldGlueResultNo != ResNumResults-1)
2664  ReplaceUses(SDValue(Node, OldGlueResultNo),
2665  SDValue(Res, ResNumResults - 1));
2666 
2667  if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2668  --ResNumResults;
2669 
2670  // Move the chain reference if needed.
2671  if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2672  (unsigned)OldChainResultNo != ResNumResults-1)
2673  ReplaceUses(SDValue(Node, OldChainResultNo),
2674  SDValue(Res, ResNumResults - 1));
2675 
2676  // Otherwise, no replacement happened because the node already exists. Replace
2677  // Uses of the old node with the new one.
2678  if (Res != Node) {
2679  ReplaceNode(Node, Res);
2680  } else {
2682  }
2683 
2684  return Res;
2685 }
2686 
2687 /// CheckSame - Implements OP_CheckSame.
2688 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2689 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2690  SDValue N,
2691  const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2692  // Accept if it is exactly the same as a previously recorded node.
2693  unsigned RecNo = MatcherTable[MatcherIndex++];
2694  assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2695  return N == RecordedNodes[RecNo].first;
2696 }
2697 
2698 /// CheckChildSame - Implements OP_CheckChildXSame.
2699 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2700 CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2701  SDValue N,
2702  const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes,
2703  unsigned ChildNo) {
2704  if (ChildNo >= N.getNumOperands())
2705  return false; // Match fails if out of range child #.
2706  return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
2707  RecordedNodes);
2708 }
2709 
2710 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2711 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2712 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2713  const SelectionDAGISel &SDISel) {
2714  return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
2715 }
2716 
2717 /// CheckNodePredicate - Implements OP_CheckNodePredicate.
2718 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2719 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2720  const SelectionDAGISel &SDISel, SDNode *N) {
2721  return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
2722 }
2723 
2724 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2725 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2726  SDNode *N) {
2727  uint16_t Opc = MatcherTable[MatcherIndex++];
2728  Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2729  return N->getOpcode() == Opc;
2730 }
2731 
2732 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2733 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2734  const TargetLowering *TLI, const DataLayout &DL) {
2735  MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2736  if (N.getValueType() == VT) return true;
2737 
2738  // Handle the case when VT is iPTR.
2739  return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2740 }
2741 
2742 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2743 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2744  SDValue N, const TargetLowering *TLI, const DataLayout &DL,
2745  unsigned ChildNo) {
2746  if (ChildNo >= N.getNumOperands())
2747  return false; // Match fails if out of range child #.
2748  return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
2749  DL);
2750 }
2751 
2752 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2753 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2754  SDValue N) {
2755  return cast<CondCodeSDNode>(N)->get() ==
2756  (ISD::CondCode)MatcherTable[MatcherIndex++];
2757 }
2758 
2759 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2760 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2761  SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2762  MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2763  if (cast<VTSDNode>(N)->getVT() == VT)
2764  return true;
2765 
2766  // Handle the case when VT is iPTR.
2767  return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
2768 }
2769 
2770 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2771 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2772  SDValue N) {
2773  int64_t Val = MatcherTable[MatcherIndex++];
2774  if (Val & 128)
2775  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2776 
2778  return C && C->getSExtValue() == Val;
2779 }
2780 
2781 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2782 CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2783  SDValue N, unsigned ChildNo) {
2784  if (ChildNo >= N.getNumOperands())
2785  return false; // Match fails if out of range child #.
2786  return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
2787 }
2788 
2789 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2790 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2791  SDValue N, const SelectionDAGISel &SDISel) {
2792  int64_t Val = MatcherTable[MatcherIndex++];
2793  if (Val & 128)
2794  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2795 
2796  if (N->getOpcode() != ISD::AND) return false;
2797 
2799  return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
2800 }
2801 
2802 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2803 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2804  SDValue N, const SelectionDAGISel &SDISel) {
2805  int64_t Val = MatcherTable[MatcherIndex++];
2806  if (Val & 128)
2807  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2808 
2809  if (N->getOpcode() != ISD::OR) return false;
2810 
2812  return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
2813 }
2814 
2815 /// IsPredicateKnownToFail - If we know how and can do so without pushing a
2816 /// scope, evaluate the current node. If the current predicate is known to
2817 /// fail, set Result=true and return anything. If the current predicate is
2818 /// known to pass, set Result=false and return the MatcherIndex to continue
2819 /// with. If the current predicate is unknown, set Result=false and return the
2820 /// MatcherIndex to continue with.
2821 static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2822  unsigned Index, SDValue N,
2823  bool &Result,
2824  const SelectionDAGISel &SDISel,
2825  SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2826  switch (Table[Index++]) {
2827  default:
2828  Result = false;
2829  return Index-1; // Could not evaluate this predicate.
2831  Result = !::CheckSame(Table, Index, N, RecordedNodes);
2832  return Index;
2837  Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
2838  Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2839  return Index;
2841  Result = !::CheckPatternPredicate(Table, Index, SDISel);
2842  return Index;
2844  Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
2845  return Index;
2847  Result = !::CheckOpcode(Table, Index, N.getNode());
2848  return Index;
2850  Result = !::CheckType(Table, Index, N, SDISel.TLI,
2851  SDISel.CurDAG->getDataLayout());
2852  return Index;
2854  unsigned Res = Table[Index++];
2855  Result = !::CheckType(Table, Index, N.getValue(Res), SDISel.TLI,
2856  SDISel.CurDAG->getDataLayout());
2857  return Index;
2858  }
2867  Result = !::CheckChildType(
2868  Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
2869  Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
2870  return Index;
2872  Result = !::CheckCondCode(Table, Index, N);
2873  return Index;
2875  Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
2876  SDISel.CurDAG->getDataLayout());
2877  return Index;
2879  Result = !::CheckInteger(Table, Index, N);
2880  return Index;
2886  Result = !::CheckChildInteger(Table, Index, N,
2887  Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
2888  return Index;
2890  Result = !::CheckAndImm(Table, Index, N, SDISel);
2891  return Index;
2893  Result = !::CheckOrImm(Table, Index, N, SDISel);
2894  return Index;
2895  }
2896 }
2897 
2898 namespace {
2899 
2900 struct MatchScope {
2901  /// FailIndex - If this match fails, this is the index to continue with.
2902  unsigned FailIndex;
2903 
2904  /// NodeStack - The node stack when the scope was formed.
2905  SmallVector<SDValue, 4> NodeStack;
2906 
2907  /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
2908  unsigned NumRecordedNodes;
2909 
2910  /// NumMatchedMemRefs - The number of matched memref entries.
2911  unsigned NumMatchedMemRefs;
2912 
2913  /// InputChain/InputGlue - The current chain/glue
2914  SDValue InputChain, InputGlue;
2915 
2916  /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
2917  bool HasChainNodesMatched;
2918 };
2919 
2920 /// \A DAG update listener to keep the matching state
2921 /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
2922 /// change the DAG while matching. X86 addressing mode matcher is an example
2923 /// for this.
2924 class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
2925 {
2926  SDNode **NodeToMatch;
2928  SmallVectorImpl<MatchScope> &MatchScopes;
2929 
2930 public:
2931  MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
2932  SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
2934  : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
2935  RecordedNodes(RN), MatchScopes(MS) {}
2936 
2937  void NodeDeleted(SDNode *N, SDNode *E) override {
2938  // Some early-returns here to avoid the search if we deleted the node or
2939  // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
2940  // do, so it's unnecessary to update matching state at that point).
2941  // Neither of these can occur currently because we only install this
2942  // update listener during matching a complex patterns.
2943  if (!E || E->isMachineOpcode())
2944  return;
2945  // Check if NodeToMatch was updated.
2946  if (N == *NodeToMatch)
2947  *NodeToMatch = E;
2948  // Performing linear search here does not matter because we almost never
2949  // run this code. You'd have to have a CSE during complex pattern
2950  // matching.
2951  for (auto &I : RecordedNodes)
2952  if (I.first.getNode() == N)
2953  I.first.setNode(E);
2954 
2955  for (auto &I : MatchScopes)
2956  for (auto &J : I.NodeStack)
2957  if (J.getNode() == N)
2958  J.setNode(E);
2959  }
2960 };
2961 
2962 } // end anonymous namespace
2963 
2965  const unsigned char *MatcherTable,
2966  unsigned TableSize) {
2967  // FIXME: Should these even be selected? Handle these cases in the caller?
2968  switch (NodeToMatch->getOpcode()) {
2969  default:
2970  break;
2971  case ISD::EntryToken: // These nodes remain the same.
2972  case ISD::BasicBlock:
2973  case ISD::Register:
2974  case ISD::RegisterMask:
2975  case ISD::HANDLENODE:
2976  case ISD::MDNODE_SDNODE:
2977  case ISD::TargetConstant:
2978  case ISD::TargetConstantFP:
2980  case ISD::TargetFrameIndex:
2982  case ISD::MCSymbol:
2984  case ISD::TargetJumpTable:
2987  case ISD::TokenFactor:
2988  case ISD::CopyFromReg:
2989  case ISD::CopyToReg:
2990  case ISD::EH_LABEL:
2991  case ISD::ANNOTATION_LABEL:
2992  case ISD::LIFETIME_START:
2993  case ISD::LIFETIME_END:
2994  NodeToMatch->setNodeId(-1); // Mark selected.
2995  return;
2996  case ISD::AssertSext:
2997  case ISD::AssertZext:
2998  ReplaceUses(SDValue(NodeToMatch, 0), NodeToMatch->getOperand(0));
2999  CurDAG->RemoveDeadNode(NodeToMatch);
3000  return;
3001  case ISD::INLINEASM:
3002  case ISD::INLINEASM_BR:
3003  Select_INLINEASM(NodeToMatch,
3004  NodeToMatch->getOpcode() == ISD::INLINEASM_BR);
3005  return;
3006  case ISD::READ_REGISTER:
3007  Select_READ_REGISTER(NodeToMatch);
3008  return;
3009  case ISD::WRITE_REGISTER:
3010  Select_WRITE_REGISTER(NodeToMatch);
3011  return;
3012  case ISD::UNDEF:
3013  Select_UNDEF(NodeToMatch);
3014  return;
3015  }
3016 
3017  assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
3018 
3019  // Set up the node stack with NodeToMatch as the only node on the stack.
3020  SmallVector<SDValue, 8> NodeStack;
3021  SDValue N = SDValue(NodeToMatch, 0);
3022  NodeStack.push_back(N);
3023 
3024  // MatchScopes - Scopes used when matching, if a match failure happens, this
3025  // indicates where to continue checking.
3026  SmallVector<MatchScope, 8> MatchScopes;
3027 
3028  // RecordedNodes - This is the set of nodes that have been recorded by the
3029  // state machine. The second value is the parent of the node, or null if the
3030  // root is recorded.
3031  SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
3032 
3033  // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3034  // pattern.
3035  SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
3036 
3037  // These are the current input chain and glue for use when generating nodes.
3038  // Various Emit operations change these. For example, emitting a copytoreg
3039  // uses and updates these.
3040  SDValue InputChain, InputGlue;
3041 
3042  // ChainNodesMatched - If a pattern matches nodes that have input/output
3043  // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3044  // which ones they are. The result is captured into this list so that we can
3045  // update the chain results when the pattern is complete.
3046  SmallVector<SDNode*, 3> ChainNodesMatched;
3047 
3048  LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3049 
3050  // Determine where to start the interpreter. Normally we start at opcode #0,
3051  // but if the state machine starts with an OPC_SwitchOpcode, then we
3052  // accelerate the first lookup (which is guaranteed to be hot) with the
3053  // OpcodeOffset table.
3054  unsigned MatcherIndex = 0;
3055 
3056  if (!OpcodeOffset.empty()) {
3057  // Already computed the OpcodeOffset table, just index into it.
3058  if (N.getOpcode() < OpcodeOffset.size())
3059  MatcherIndex = OpcodeOffset[N.getOpcode()];
3060  LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3061 
3062  } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3063  // Otherwise, the table isn't computed, but the state machine does start
3064  // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3065  // is the first time we're selecting an instruction.
3066  unsigned Idx = 1;
3067  while (true) {
3068  // Get the size of this case.
3069  unsigned CaseSize = MatcherTable[Idx++];
3070  if (CaseSize & 128)
3071  CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
3072  if (CaseSize == 0) break;
3073 
3074  // Get the opcode, add the index to the table.
3075  uint16_t Opc = MatcherTable[Idx++];
3076  Opc |= (unsigned short)MatcherTable[Idx++] << 8;
3077  if (Opc >= OpcodeOffset.size())
3078  OpcodeOffset.resize((Opc+1)*2);
3079  OpcodeOffset[Opc] = Idx;
3080  Idx += CaseSize;
3081  }
3082 
3083  // Okay, do the lookup for the first opcode.
3084  if (N.getOpcode() < OpcodeOffset.size())
3085  MatcherIndex = OpcodeOffset[N.getOpcode()];
3086  }
3087 
3088  while (true) {
3089  assert(MatcherIndex < TableSize && "Invalid index");
3090 #ifndef NDEBUG
3091  unsigned CurrentOpcodeIndex = MatcherIndex;
3092 #endif
3093  BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
3094  switch (Opcode) {
3095  case OPC_Scope: {
3096  // Okay, the semantics of this operation are that we should push a scope
3097  // then evaluate the first child. However, pushing a scope only to have
3098  // the first check fail (which then pops it) is inefficient. If we can
3099  // determine immediately that the first check (or first several) will
3100  // immediately fail, don't even bother pushing a scope for them.
3101  unsigned FailIndex;
3102 
3103  while (true) {
3104  unsigned NumToSkip = MatcherTable[MatcherIndex++];
3105  if (NumToSkip & 128)
3106  NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3107  // Found the end of the scope with no match.
3108  if (NumToSkip == 0) {
3109  FailIndex = 0;
3110  break;
3111  }
3112 
3113  FailIndex = MatcherIndex+NumToSkip;
3114 
3115  unsigned MatcherIndexOfPredicate = MatcherIndex;
3116  (void)MatcherIndexOfPredicate; // silence warning.
3117 
3118  // If we can't evaluate this predicate without pushing a scope (e.g. if
3119  // it is a 'MoveParent') or if the predicate succeeds on this node, we
3120  // push the scope and evaluate the full predicate chain.
3121  bool Result;
3122  MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
3123  Result, *this, RecordedNodes);
3124  if (!Result)
3125  break;
3126 
3127  LLVM_DEBUG(
3128  dbgs() << " Skipped scope entry (due to false predicate) at "
3129  << "index " << MatcherIndexOfPredicate << ", continuing at "
3130  << FailIndex << "\n");
3131  ++NumDAGIselRetries;
3132 
3133  // Otherwise, we know that this case of the Scope is guaranteed to fail,
3134  // move to the next case.
3135  MatcherIndex = FailIndex;
3136  }
3137 
3138  // If the whole scope failed to match, bail.
3139  if (FailIndex == 0) break;
3140 
3141  // Push a MatchScope which indicates where to go if the first child fails
3142  // to match.
3143  MatchScope NewEntry;
3144  NewEntry.FailIndex = FailIndex;
3145  NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
3146  NewEntry.NumRecordedNodes = RecordedNodes.size();
3147  NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3148  NewEntry.InputChain = InputChain;
3149  NewEntry.InputGlue = InputGlue;
3150  NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3151  MatchScopes.push_back(NewEntry);
3152  continue;
3153  }
3154  case OPC_RecordNode: {
3155  // Remember this node, it may end up being an operand in the pattern.
3156  SDNode *Parent = nullptr;
3157  if (NodeStack.size() > 1)
3158  Parent = NodeStack[NodeStack.size()-2].getNode();
3159  RecordedNodes.push_back(std::make_pair(N, Parent));
3160  continue;
3161  }
3162 
3166  case OPC_RecordChild6: case OPC_RecordChild7: {
3167  unsigned ChildNo = Opcode-OPC_RecordChild0;
3168  if (ChildNo >= N.getNumOperands())
3169  break; // Match fails if out of range child #.
3170 
3171  RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
3172  N.getNode()));
3173  continue;
3174  }
3175  case OPC_RecordMemRef:
3176  if (auto *MN = dyn_cast<MemSDNode>(N))
3177  MatchedMemRefs.push_back(MN->getMemOperand());
3178  else {
3179  LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3180  dbgs() << '\n');
3181  }
3182 
3183  continue;
3184 
3185  case OPC_CaptureGlueInput:
3186  // If the current node has an input glue, capture it in InputGlue.
3187  if (N->getNumOperands() != 0 &&
3188  N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
3189  InputGlue = N->getOperand(N->getNumOperands()-1);
3190  continue;
3191 
3192  case OPC_MoveChild: {
3193  unsigned ChildNo = MatcherTable[MatcherIndex++];
3194  if (ChildNo >= N.getNumOperands())
3195  break; // Match fails if out of range child #.
3196  N = N.getOperand(ChildNo);
3197  NodeStack.push_back(N);
3198  continue;
3199  }
3200 
3201  case OPC_MoveChild0: case OPC_MoveChild1:
3202  case OPC_MoveChild2: case OPC_MoveChild3:
3203  case OPC_MoveChild4: case OPC_MoveChild5:
3204  case OPC_MoveChild6: case OPC_MoveChild7: {
3205  unsigned ChildNo = Opcode-OPC_MoveChild0;
3206  if (ChildNo >= N.getNumOperands())
3207  break; // Match fails if out of range child #.
3208  N = N.getOperand(ChildNo);
3209  NodeStack.push_back(N);
3210  continue;
3211  }
3212 
3213  case OPC_MoveParent:
3214  // Pop the current node off the NodeStack.
3215  NodeStack.pop_back();
3216  assert(!NodeStack.empty() && "Node stack imbalance!");
3217  N = NodeStack.back();
3218  continue;
3219 
3220  case OPC_CheckSame:
3221  if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3222  continue;
3223 
3226  if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3227  Opcode-OPC_CheckChild0Same))
3228  break;
3229  continue;
3230 
3232  if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
3233  continue;
3234  case OPC_CheckPredicate:
3235  if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
3236  N.getNode()))
3237  break;
3238  continue;
3240  unsigned OpNum = MatcherTable[MatcherIndex++];
3241  SmallVector<SDValue, 8> Operands;
3242 
3243  for (unsigned i = 0; i < OpNum; ++i)
3244  Operands.push_back(RecordedNodes[MatcherTable[MatcherIndex++]].first);
3245 
3246  unsigned PredNo = MatcherTable[MatcherIndex++];
3247  if (!CheckNodePredicateWithOperands(N.getNode(), PredNo, Operands))
3248  break;
3249  continue;
3250  }
3251  case OPC_CheckComplexPat: {
3252  unsigned CPNum = MatcherTable[MatcherIndex++];
3253  unsigned RecNo = MatcherTable[MatcherIndex++];
3254  assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3255 
3256  // If target can modify DAG during matching, keep the matching state
3257  // consistent.
3258  std::unique_ptr<MatchStateUpdater> MSU;
3260  MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3261  MatchScopes));
3262 
3263  if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
3264  RecordedNodes[RecNo].first, CPNum,
3265  RecordedNodes))
3266  break;
3267  continue;
3268  }
3269  case OPC_CheckOpcode:
3270  if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
3271  continue;
3272 
3273  case OPC_CheckType:
3274  if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
3275  CurDAG->getDataLayout()))
3276  break;
3277  continue;
3278 
3279  case OPC_CheckTypeRes: {
3280  unsigned Res = MatcherTable[MatcherIndex++];
3281  if (!::CheckType(MatcherTable, MatcherIndex, N.getValue(Res), TLI,
3282  CurDAG->getDataLayout()))
3283  break;
3284  continue;
3285  }
3286 
3287  case OPC_SwitchOpcode: {
3288  unsigned CurNodeOpcode = N.getOpcode();
3289  unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3290  unsigned CaseSize;
3291  while (true) {
3292  // Get the size of this case.
3293  CaseSize = MatcherTable[MatcherIndex++];
3294  if (CaseSize & 128)
3295  CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3296  if (CaseSize == 0) break;
3297 
3298  uint16_t Opc = MatcherTable[MatcherIndex++];
3299  Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3300 
3301  // If the opcode matches, then we will execute this case.
3302  if (CurNodeOpcode == Opc)
3303  break;
3304 
3305  // Otherwise, skip over this case.
3306  MatcherIndex += CaseSize;
3307  }
3308 
3309  // If no cases matched, bail out.
3310  if (CaseSize == 0) break;
3311 
3312  // Otherwise, execute the case we found.
3313  LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
3314  << MatcherIndex << "\n");
3315  continue;
3316  }
3317 
3318  case OPC_SwitchType: {
3319  MVT CurNodeVT = N.getSimpleValueType();
3320  unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3321  unsigned CaseSize;
3322  while (true) {
3323  // Get the size of this case.
3324  CaseSize = MatcherTable[MatcherIndex++];
3325  if (CaseSize & 128)
3326  CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3327  if (CaseSize == 0) break;
3328 
3329  MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3330  if (CaseVT == MVT::iPTR)
3331  CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
3332 
3333  // If the VT matches, then we will execute this case.
3334  if (CurNodeVT == CaseVT)
3335  break;
3336 
3337  // Otherwise, skip over this case.
3338  MatcherIndex += CaseSize;
3339  }
3340 
3341  // If no cases matched, bail out.
3342  if (CaseSize == 0) break;
3343 
3344  // Otherwise, execute the case we found.
3345  LLVM_DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
3346  << "] from " << SwitchStart << " to " << MatcherIndex
3347  << '\n');
3348  continue;
3349  }
3354  if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
3355  CurDAG->getDataLayout(),
3356  Opcode - OPC_CheckChild0Type))
3357  break;
3358  continue;
3359  case OPC_CheckCondCode:
3360  if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3361  continue;
3362  case OPC_CheckValueType:
3363  if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3364  CurDAG->getDataLayout()))
3365  break;
3366  continue;
3367  case OPC_CheckInteger:
3368  if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3369  continue;
3373  if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3374  Opcode-OPC_CheckChild0Integer)) break;
3375  continue;
3376  case OPC_CheckAndImm:
3377  if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
3378  continue;
3379  case OPC_CheckOrImm:
3380  if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
3381  continue;
3382 
3384  assert(NodeStack.size() != 1 && "No parent node");
3385  // Verify that all intermediate nodes between the root and this one have
3386  // a single use.
3387  bool HasMultipleUses = false;
3388  for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
3389  if (!NodeStack[i].getNode()->hasOneUse()) {
3390  HasMultipleUses = true;
3391  break;
3392  }
3393  if (HasMultipleUses) break;
3394 
3395  // Check to see that the target thinks this is profitable to fold and that
3396  // we can fold it without inducing cycles in the graph.
3397  if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3398  NodeToMatch) ||
3399  !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3400  NodeToMatch, OptLevel,
3401  true/*We validate our own chains*/))
3402  break;
3403 
3404  continue;
3405  }
3406  case OPC_EmitInteger: {
3408  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3409  int64_t Val = MatcherTable[MatcherIndex++];
3410  if (Val & 128)
3411  Val = GetVBR(Val, MatcherTable, MatcherIndex);
3412  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3413  CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
3414  VT), nullptr));
3415  continue;
3416  }
3417  case OPC_EmitRegister: {
3419  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3420  unsigned RegNo = MatcherTable[MatcherIndex++];
3421  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3422  CurDAG->getRegister(RegNo, VT), nullptr));
3423  continue;
3424  }
3425  case OPC_EmitRegister2: {
3426  // For targets w/ more than 256 register names, the register enum
3427  // values are stored in two bytes in the matcher table (just like
3428  // opcodes).
3430  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3431  unsigned RegNo = MatcherTable[MatcherIndex++];
3432  RegNo |= MatcherTable[MatcherIndex++] << 8;
3433  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3434  CurDAG->getRegister(RegNo, VT), nullptr));
3435  continue;
3436  }
3437 
3438  case OPC_EmitConvertToTarget: {
3439  // Convert from IMM/FPIMM to target version.
3440  unsigned RecNo = MatcherTable[MatcherIndex++];
3441  assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3442  SDValue Imm = RecordedNodes[RecNo].first;
3443 
3444  if (Imm->getOpcode() == ISD::Constant) {
3445  const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
3446  Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
3447  Imm.getValueType());
3448  } else if (Imm->getOpcode() == ISD::ConstantFP) {
3449  const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
3450  Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
3451  Imm.getValueType());
3452  }
3453 
3454  RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
3455  continue;
3456  }
3457 
3458  case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3459  case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3460  case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3461  // These are space-optimized forms of OPC_EmitMergeInputChains.
3462  assert(!InputChain.getNode() &&
3463  "EmitMergeInputChains should be the first chain producing node");
3464  assert(ChainNodesMatched.empty() &&
3465  "Should only have one EmitMergeInputChains per match");
3466 
3467  // Read all of the chained nodes.
3468  unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3469  assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3470  ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3471 
3472  // FIXME: What if other value results of the node have uses not matched
3473  // by this pattern?
3474  if (ChainNodesMatched.back() != NodeToMatch &&
3475  !RecordedNodes[RecNo].first.hasOneUse()) {
3476  ChainNodesMatched.clear();
3477  break;
3478  }
3479 
3480  // Merge the input chains if they are not intra-pattern references.
3481  InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3482 
3483  if (!InputChain.getNode())
3484  break; // Failed to merge.
3485  continue;
3486  }
3487 
3488  case OPC_EmitMergeInputChains: {
3489  assert(!InputChain.getNode() &&
3490  "EmitMergeInputChains should be the first chain producing node");
3491  // This node gets a list of nodes we matched in the input that have
3492  // chains. We want to token factor all of the input chains to these nodes
3493  // together. However, if any of the input chains is actually one of the
3494  // nodes matched in this pattern, then we have an intra-match reference.
3495  // Ignore these because the newly token factored chain should not refer to
3496  // the old nodes.
3497  unsigned NumChains = MatcherTable[MatcherIndex++];
3498  assert(NumChains != 0 && "Can't TF zero chains");
3499 
3500  assert(ChainNodesMatched.empty() &&
3501  "Should only have one EmitMergeInputChains per match");
3502 
3503  // Read all of the chained nodes.
3504  for (unsigned i = 0; i != NumChains; ++i) {
3505  unsigned RecNo = MatcherTable[MatcherIndex++];
3506  assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3507  ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3508 
3509  // FIXME: What if other value results of the node have uses not matched
3510  // by this pattern?
3511  if (ChainNodesMatched.back() != NodeToMatch &&
3512  !RecordedNodes[RecNo].first.hasOneUse()) {
3513  ChainNodesMatched.clear();
3514  break;
3515  }
3516  }
3517 
3518  // If the inner loop broke out, the match fails.
3519  if (ChainNodesMatched.empty())
3520  break;
3521 
3522  // Merge the input chains if they are not intra-pattern references.
3523  InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3524 
3525  if (!InputChain.getNode())
3526  break; // Failed to merge.
3527 
3528  continue;
3529  }
3530 
3531  case OPC_EmitCopyToReg: {
3532  unsigned RecNo = MatcherTable[MatcherIndex++];
3533  assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3534  unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3535 
3536  if (!InputChain.getNode())
3537  InputChain = CurDAG->getEntryNode();
3538 
3539  InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
3540  DestPhysReg, RecordedNodes[RecNo].first,
3541  InputGlue);
3542 
3543  InputGlue = InputChain.getValue(1);
3544  continue;
3545  }
3546 
3547  case OPC_EmitNodeXForm: {
3548  unsigned XFormNo = MatcherTable[MatcherIndex++];
3549  unsigned RecNo = MatcherTable[MatcherIndex++];
3550  assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3551  SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
3552  RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
3553  continue;
3554  }
3555  case OPC_Coverage: {
3556  // This is emitted right before MorphNode/EmitNode.
3557  // So it should be safe to assume that this node has been selected
3558  unsigned index = MatcherTable[MatcherIndex++];
3559  index |= (MatcherTable[MatcherIndex++] << 8);
3560  dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3561  dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3562  continue;
3563  }
3564 
3565  case OPC_EmitNode: case OPC_MorphNodeTo:
3566  case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2:
3568  uint16_t TargetOpc = MatcherTable[MatcherIndex++];
3569  TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3570  unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
3571  // Get the result VT list.
3572  unsigned NumVTs;
3573  // If this is one of the compressed forms, get the number of VTs based
3574  // on the Opcode. Otherwise read the next byte from the table.
3575  if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
3576  NumVTs = Opcode - OPC_MorphNodeTo0;
3577  else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
3578  NumVTs = Opcode - OPC_EmitNode0;
3579  else
3580  NumVTs = MatcherTable[MatcherIndex++];
3581  SmallVector<EVT, 4> VTs;
3582  for (unsigned i = 0; i != NumVTs; ++i) {
3584  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3585  if (VT == MVT::iPTR)
3586  VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
3587  VTs.push_back(VT);
3588  }
3589 
3590  if (EmitNodeInfo & OPFL_Chain)
3591  VTs.push_back(MVT::Other);
3592  if (EmitNodeInfo & OPFL_GlueOutput)
3593  VTs.push_back(MVT::Glue);
3594 
3595  // This is hot code, so optimize the two most common cases of 1 and 2
3596  // results.
3597  SDVTList VTList;
3598  if (VTs.size() == 1)
3599  VTList = CurDAG->getVTList(VTs[0]);
3600  else if (VTs.size() == 2)
3601  VTList = CurDAG->getVTList(VTs[0], VTs[1]);
3602  else
3603  VTList = CurDAG->getVTList(VTs);
3604 
3605  // Get the operand list.
3606  unsigned NumOps = MatcherTable[MatcherIndex++];
3608  for (unsigned i = 0; i != NumOps; ++i) {
3609  unsigned RecNo = MatcherTable[MatcherIndex++];
3610  if (RecNo & 128)
3611  RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
3612 
3613  assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
3614  Ops.push_back(RecordedNodes[RecNo].first);
3615  }
3616 
3617  // If there are variadic operands to add, handle them now.
3618  if (EmitNodeInfo & OPFL_VariadicInfo) {
3619  // Determine the start index to copy from.
3620  unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
3621  FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
3622  assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
3623  "Invalid variadic node");
3624  // Copy all of the variadic operands, not including a potential glue
3625  // input.
3626  for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
3627  i != e; ++i) {
3628  SDValue V = NodeToMatch->getOperand(i);
3629  if (V.getValueType() == MVT::Glue) break;
3630  Ops.push_back(V);
3631  }
3632  }
3633 
3634  // If this has chain/glue inputs, add them.
3635  if (EmitNodeInfo & OPFL_Chain)
3636  Ops.push_back(InputChain);
3637  if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
3638  Ops.push_back(InputGlue);
3639 
3640  // Create the node.
3641  MachineSDNode *Res = nullptr;
3642  bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo ||
3643  (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2);
3644  if (!IsMorphNodeTo) {
3645  // If this is a normal EmitNode command, just create the new node and
3646  // add the results to the RecordedNodes list.
3647  Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
3648  VTList, Ops);
3649 
3650  // Add all the non-glue/non-chain results to the RecordedNodes list.
3651  for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
3652  if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
3653  RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
3654  nullptr));
3655  }
3656  } else {
3657  assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
3658  "NodeToMatch was removed partway through selection");
3660  SDNode *E) {
3661  CurDAG->salvageDebugInfo(*N);
3662  auto &Chain = ChainNodesMatched;
3663  assert((!E || !is_contained(Chain, N)) &&
3664  "Chain node replaced during MorphNode");
3665  Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end());
3666  });
3667  Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList,
3668  Ops, EmitNodeInfo));
3669  }
3670 
3671  // If the node had chain/glue results, update our notion of the current
3672  // chain and glue.
3673  if (EmitNodeInfo & OPFL_GlueOutput) {
3674  InputGlue = SDValue(Res, VTs.size()-1);
3675  if (EmitNodeInfo & OPFL_Chain)
3676  InputChain = SDValue(Res, VTs.size()-2);
3677  } else if (EmitNodeInfo & OPFL_Chain)
3678  InputChain = SDValue(Res, VTs.size()-1);
3679 
3680  // If the OPFL_MemRefs glue is set on this node, slap all of the
3681  // accumulated memrefs onto it.
3682  //
3683  // FIXME: This is vastly incorrect for patterns with multiple outputs
3684  // instructions that access memory and for ComplexPatterns that match
3685  // loads.
3686  if (EmitNodeInfo & OPFL_MemRefs) {
3687  // Only attach load or store memory operands if the generated
3688  // instruction may load or store.
3689  const MCInstrDesc &MCID = TII->get(TargetOpc);
3690  bool mayLoad = MCID.mayLoad();
3691  bool mayStore = MCID.mayStore();
3692 
3693  // We expect to have relatively few of these so just filter them into a
3694  // temporary buffer so that we can easily add them to the instruction.
3695  SmallVector<MachineMemOperand *, 4> FilteredMemRefs;
3696  for (MachineMemOperand *MMO : MatchedMemRefs) {
3697  if (MMO->isLoad()) {
3698  if (mayLoad)
3699  FilteredMemRefs.push_back(MMO);
3700  } else if (MMO->isStore()) {
3701  if (mayStore)
3702  FilteredMemRefs.push_back(MMO);
3703  } else {
3704  FilteredMemRefs.push_back(MMO);
3705  }
3706  }
3707 
3708  CurDAG->setNodeMemRefs(Res, FilteredMemRefs);
3709  }
3710 
3711  LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
3712  << " Dropping mem operands\n";
3713  dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
3714  << " node: ";
3715  Res->dump(CurDAG););
3716 
3717  // If this was a MorphNodeTo then we're completely done!
3718  if (IsMorphNodeTo) {
3719  // Update chain uses.
3720  UpdateChains(Res, InputChain, ChainNodesMatched, true);
3721  return;
3722  }
3723  continue;
3724  }
3725 
3726  case OPC_CompleteMatch: {
3727  // The match has been completed, and any new nodes (if any) have been
3728  // created. Patch up references to the matched dag to use the newly
3729  // created nodes.
3730  unsigned NumResults = MatcherTable[MatcherIndex++];
3731 
3732  for (unsigned i = 0; i != NumResults; ++i) {
3733  unsigned ResSlot = MatcherTable[MatcherIndex++];
3734  if (ResSlot & 128)
3735  ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
3736 
3737  assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
3738  SDValue Res = RecordedNodes[ResSlot].first;
3739 
3740  assert(i < NodeToMatch->getNumValues() &&
3741  NodeToMatch->getValueType(i) != MVT::Other &&
3742  NodeToMatch->getValueType(i) != MVT::Glue &&
3743  "Invalid number of results to complete!");
3744  assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
3745  NodeToMatch->getValueType(i) == MVT::iPTR ||
3746  Res.getValueType() == MVT::iPTR ||
3747  NodeToMatch->getValueType(i).getSizeInBits() ==
3748  Res.getValueSizeInBits()) &&
3749  "invalid replacement");
3750  ReplaceUses(SDValue(NodeToMatch, i), Res);
3751  }
3752 
3753  // Update chain uses.
3754  UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
3755 
3756  // If the root node defines glue, we need to update it to the glue result.
3757  // TODO: This never happens in our tests and I think it can be removed /
3758  // replaced with an assert, but if we do it this the way the change is
3759  // NFC.
3760  if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
3761  MVT::Glue &&
3762  InputGlue.getNode())
3763  ReplaceUses(SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
3764  InputGlue);
3765 
3766  assert(NodeToMatch->use_empty() &&
3767  "Didn't replace all uses of the node?");
3768  CurDAG->RemoveDeadNode(NodeToMatch);
3769 
3770  return;
3771  }
3772  }
3773 
3774  // If the code reached this point, then the match failed. See if there is
3775  // another child to try in the current 'Scope', otherwise pop it until we
3776  // find a case to check.
3777  LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
3778  << "\n");
3779  ++NumDAGIselRetries;
3780  while (true) {
3781  if (MatchScopes.empty()) {
3782  CannotYetSelect(NodeToMatch);
3783  return;
3784  }
3785 
3786  // Restore the interpreter state back to the point where the scope was
3787  // formed.
3788  MatchScope &LastScope = MatchScopes.back();
3789  RecordedNodes.resize(LastScope.NumRecordedNodes);
3790  NodeStack.clear();
3791  NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
3792  N = NodeStack.back();
3793 
3794  if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
3795  MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
3796  MatcherIndex = LastScope.FailIndex;
3797 
3798  LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
3799 
3800  InputChain = LastScope.InputChain;
3801  InputGlue = LastScope.InputGlue;
3802  if (!LastScope.HasChainNodesMatched)
3803  ChainNodesMatched.clear();
3804 
3805  // Check to see what the offset is at the new MatcherIndex. If it is zero
3806  // we have reached the end of this scope, otherwise we have another child
3807  // in the current scope to try.
3808  unsigned NumToSkip = MatcherTable[MatcherIndex++];
3809  if (NumToSkip & 128)
3810  NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3811 
3812  // If we have another child in this scope to match, update FailIndex and
3813  // try it.
3814  if (NumToSkip != 0) {
3815  LastScope.FailIndex = MatcherIndex+NumToSkip;
3816  break;
3817  }
3818 
3819  // End of this scope, pop it and try the next child in the containing
3820  // scope.
3821  MatchScopes.pop_back();
3822  }
3823  }
3824 }
3825 
3827  assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
3828  auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3829  if (!C)
3830  return false;
3831 
3832  // Detect when "or" is used to add an offset to a stack object.
3833  if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) {
3834  MachineFrameInfo &MFI = MF->getFrameInfo();
3835  unsigned A = MFI.getObjectAlignment(FN->getIndex());
3836  assert(isPowerOf2_32(A) && "Unexpected alignment");
3837  int32_t Off = C->getSExtValue();
3838  // If the alleged offset fits in the zero bits guaranteed by
3839  // the alignment, then this or is really an add.
3840  return (Off >= 0) && (((A - 1) & Off) == unsigned(Off));
3841  }
3842  return false;
3843 }
3844 
3845 void SelectionDAGISel::CannotYetSelect(SDNode *N) {
3846  std::string msg;
3847  raw_string_ostream Msg(msg);
3848  Msg << "Cannot select: ";
3849 
3850  if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
3852  N->getOpcode() != ISD::INTRINSIC_VOID) {
3853  N->printrFull(Msg, CurDAG);
3854  Msg << "\nIn function: " << MF->getName();
3855  } else {
3856  bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
3857  unsigned iid =
3858  cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
3859  if (iid < Intrinsic::num_intrinsics)
3860  Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid, None);
3861  else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
3862  Msg << "target intrinsic %" << TII->getName(iid);
3863  else
3864  Msg << "unknown intrinsic #" << iid;
3865  }
3866  report_fatal_error(Msg.str());
3867 }
3868 
3869 char SelectionDAGISel::ID = 0;
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
Definition: ISDOpcodes.h:682
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
uint64_t CallInst * C
Return a value (possibly void), from a function.
std::vector< BitTestBlock > BitTestCases
Vector of BitTestBlock structures used to communicate SwitchInst code generation information.
Diagnostic information for ISel fallback path.
SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type...
SelectionDAGBuilder * SDB
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
mop_iterator operands_end()
Definition: MachineInstr.h:453
EVT getValueType() const
Return the ValueType of the referenced return value.
static SDNode * findGlueUse(SDNode *N)
findGlueUse - Return use of MVT::Glue value produced by the specified SDNode.
void EmitLiveInCopies(MachineBasicBlock *EntryMBB, const TargetRegisterInfo &TRI, const TargetInstrInfo &TII)
EmitLiveInCopies - Emit copies to initialize livein virtual registers into the given entry block...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
bool hasPostISelHook(QueryType Type=IgnoreBundle) const
Return true if this instruction requires adjustment after instruction selection by calling a target h...
Definition: MachineInstr.h:885
Diagnostic information for missed-optimization remarks.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1562
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static int getNumFixedFromVariadicInfo(unsigned Flags)
getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the number of fixed arity values ...
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
bool LegalizeTypes()
This transforms the SelectionDAG into a SelectionDAG that only uses types natively supported by the t...
DiagnosticInfoOptimizationBase::Argument NV
GCFunctionInfo * GFI
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it&#39;s profitable to fold the specific operand node N of U during ...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:41
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR...
Definition: ISDOpcodes.h:740
static unsigned virtReg2Index(unsigned Reg)
Convert a virtual register number to a 0-based index.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Various leaf nodes.
Definition: ISDOpcodes.h:59
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
static cl::opt< bool > ViewISelDAGs("view-isel-dags", cl::Hidden, cl::desc("Pop up a window to show isel dags as they are selected"))
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
unsigned getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo)
Propagate swifterror values through the machine function CFG.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const
CheckAndMask - The isel is trying to match something like (and X, 255).
const MachineFunctionProperties & getProperties() const
Get the function properties.
virtual unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const
Return the register ID of the name passed in.
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
virtual bool CheckNodePredicateWithOperands(SDNode *N, unsigned PredNo, const SmallVectorImpl< SDValue > &Operands) const
CheckNodePredicateWithOperands - This function is generated by tblgen in the target.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
static unsigned getFlagWord(unsigned Kind, unsigned NumOps)
Definition: InlineAsm.h:268
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
void initializeTargetLibraryInfoWrapperPassPass(PassRegistry &)
Clients of various APIs that cause global effects on the DAG can optionally implement this interface...
Definition: SelectionDAG.h:279
unsigned getReg() const
getReg - Returns the register number.
friend struct DAGUpdateListener
DAGUpdateListener is a friend so it can manipulate the listener stack.
Definition: SelectionDAG.h:324
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
bool LegalizeVectors()
This transforms the SelectionDAG into a SelectionDAG that only uses vector math operations supported ...
This file contains the declarations for metadata subclasses.
virtual const TargetLowering * getTargetLowering() const
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1328
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
unsigned getResNo() const
Convenience function for get().getResNo().
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
Definition: SelectionDAG.h:320
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition: ISDOpcodes.h:130
bool isTerminator() const
Definition: Instruction.h:128
unsigned second
arg_iterator arg_end()
Definition: Function.h:679
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse, bool IgnoreChains)
findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path beyond "ImmedUse"...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
Metadata node.
Definition: Metadata.h:863
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:140
F(f)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1068
An instruction for reading from memory.
Definition: Instructions.h:167
RegisterPassParser class - Handle the addition of new machine passes.
void setNodeId(int Id)
Set unique node id.
SDNode * getNode() const
get the SDNode which holds the desired result
static MachineBasicBlock::iterator FindSplitPointForStackProtector(MachineBasicBlock *BB)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:465
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:398
bool isPHI() const
void viewGraph(const std::string &Title)
Pop up a GraphViz/gv window with the DAG rendered using &#39;dot&#39;.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL, unsigned ChildNo)
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:44
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, LegacyDivergenceAnalysis *Divergence)
Prepare this SelectionDAG to process code in the given MachineFunction.
void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *, unsigned)
Set the swifterror virtual register in the SwiftErrorVRegDefMap for this basic block.
static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx)
isUseOperandTiedToDef - Return true if the flag of the inline asm operand indicates it is an use oper...
Definition: InlineAsm.h:341
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:245
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
MachineFunction * MF
ScheduleDAGSDNodes * createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createHybridListDAGScheduler - This creates a bottom up register pressure aware list scheduler that m...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes, unsigned ChildNo)
CheckChildSame - Implements OP_CheckChildXSame.
const TargetLibraryInfo * LibInfo
void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG)
set - Initialize this FunctionLoweringInfo with the given Function and its associated MachineFunction...
static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI, FunctionLoweringInfo *FuncInfo)
Set up SwiftErrorVals by going through the function.
static DIExpression * prepend(const DIExpression *Expr, bool DerefBefore, int64_t Offset=0, bool DerefAfter=false, bool StackValue=false)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode *> &Visited, SmallVectorImpl< const SDNode *> &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1591
static bool isFoldedOrDeadInstruction(const Instruction *I, FunctionLoweringInfo *FuncInfo)
isFoldedOrDeadInstruction - Return true if the specified instruction is side-effect free and is eithe...
StackProtectorDescriptor SPDescriptor
A StackProtectorDescriptor structure used to communicate stack protector information in between Selec...
std::pair< unsigned, bool > getOrCreateSwiftErrorVRegDefAt(const Instruction *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes)
CheckSame - Implements OP_CheckSame.
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
SDValue getRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block...
AnalysisUsage & addRequired()
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:241
A description of a memory reference used in the backend.
void resetTargetOptions(const Function &F) const
Reset the target options based on the function&#39;s attributes.
bool hasBranchDivergence() const
Return true if branch divergence exists.
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:625
void visitSwitchCase(CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
Option class for critical edge splitting.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
virtual bool ComplexPatternFuncMutatesDAG() const
Return true if complex patterns for this target can mutate the DAG.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
This class is basically a combination of TimeRegion and Timer.
Definition: Timer.h:160
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
const Value * SwiftErrorArg
The swifterror argument of the current function.
bool isKnownWindowsMSVCEnvironment() const
Checks if the environment is MSVC.
Definition: Triple.h:532
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
void visitJumpTable(JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
const TargetLowering * TLI
#define LLVM_ATTRIBUTE_ALWAYS_INLINE
LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do so, mark a method "always...
Definition: Compiler.h:213
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:169
const MDNode * getMD() const
virtual bool enableMachineSchedDefaultSched() const
True if the machine scheduler should disable the TLI preference for preRA scheduling with the source ...
op_iterator op_end() const
virtual void Select(SDNode *N)=0
Main hook for targets to transform nodes into machine nodes.
ScheduleDAGSDNodes *(*)(SelectionDAGISel *, CodeGenOpt::Level) FunctionPassCtor
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:459
static void EnforceNodeIdInvariant(SDNode *N)
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:400
An analysis pass which caches information about the entire Module.
Definition: GCMetadata.h:152
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDNode *N)
void setFunctionLoweringInfo(FunctionLoweringInfo *FuncInfo)
Definition: SelectionDAG.h:389
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This file implements a class to represent arbitrary precision integral constant values and operations...
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:668
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
unsigned DAGSize
DAGSize - Size of DAG being instruction selected.
void initializeAAResultsWrapperPassPass(PassRegistry &)
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
defusechain_iterator - This class provides iterator support for machine operands in the function that...
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
int64_t getSExtValue() const
Key
PAL metadata keys.
virtual StringRef getPatternForIndex(unsigned index)
getPatternForIndex - Patterns selected by tablegen during ISEL
static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI)
static void InvalidateNodeId(SDNode *N)
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:888
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:477
bool canTrap() const
Return true if evaluation of this constant could trap.
Definition: Constants.cpp:432
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:726
bool isStatepoint(const CallBase *Call)
Definition: Statepoint.cpp:20
SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
#define T
ScheduleDAGSDNodes * createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up register usage reduction list scheduler...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions())
If this edge is a critical edge, insert a new node to split the critical edge.
static unsigned IsPredicateKnownToFail(const unsigned char *Table, unsigned Index, SDValue N, bool &Result, const SelectionDAGISel &SDISel, SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes)
IsPredicateKnownToFail - If we know how and can do so without pushing a scope, evaluate the current n...
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
An instruction for storing to memory.
Definition: Instructions.h:320
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:965
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they&#39;re not in a MachineFuncti...
op_iterator op_begin() const
MachinePassRegistry - Track the registration of machine passes.
void init(GCFunctionInfo *gfi, AliasAnalysis *AA, const TargetLibraryInfo *li)
virtual const TargetInstrInfo * getInstrInfo() const
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
Definition: ISDOpcodes.h:124
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:575
static MachinePassRegistry< FunctionPassCtor > Registry
RegisterScheduler class - Track the registration of instruction schedulers.
static LLVM_ATTRIBUTE_ALWAYS_INLINE uint64_t GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx)
GetVBR - decode a vbr encoding whose top bit is set.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
Definition: SelectionDAG.h:607
Legacy analysis pass which computes BranchProbabilityInfo.
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1251
void printrFull(raw_ostream &O, const SelectionDAG *G=nullptr) const
Print a SelectionDAG node and all children down to the leaves.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
static cl::opt< bool > EnableFastISelFallbackReport("fast-isel-report-on-fallback", cl::Hidden, cl::desc("Emit a diagnostic when \ast\instruction selection " "falls back to SelectionDAG."))
UNDEF - An undefined node.
Definition: ISDOpcodes.h:177
static cl::opt< bool > ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden, cl::desc("Pop up a window to show dags before the post legalize types" " dag combine pass"))
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
void finishBasicBlock()
Flush the local value map and sink local values if possible.
Definition: FastISel.cpp:144
TargetInstrInfo - Interface to description of machine instruction set.
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:850
MachineRegisterInfo * RegInfo
void clear()
clear - Clear out all the function-specific state.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
const BasicBlock & getEntryBlock() const
Definition: Function.h:639
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
static SDValue HandleMergeInputChains(SmallVectorImpl< SDNode *> &ChainNodesMatched, SelectionDAG *CurDAG)
HandleMergeInputChains - This implements the OPC_EmitMergeInputChains operation for when the pattern ...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
CriticalEdgeSplittingOptions & setMergeIdenticalEdges()
static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F, MachineModuleInfo &MMI)
virtual bool CheckPatternPredicate(unsigned PredNo) const
CheckPatternPredicate - This function is generated by tblgen in the target.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:422
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry &)
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
CodeGenOpt::Level OptLevel
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:189
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
Definition: ISDOpcodes.h:84
Wrapper pass for TargetTransformInfo.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned const MachineRegisterInfo * MRI
ScheduleDAGSDNodes * createVLIWDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createVLIWDAGScheduler - Scheduler for VLIW targets.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static void preassignSwiftErrorRegs(const TargetLowering *TLI, FunctionLoweringInfo *FuncInfo, BasicBlock::const_iterator Begin, BasicBlock::const_iterator End)
void Legalize()
This transforms the SelectionDAG into a SelectionDAG that is compatible with the target instruction s...
virtual unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
ScheduleDAGSDNodes * createDefaultScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createDefaultScheduler - This creates an instruction scheduler appropriate for the target...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1507
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
std::pair< unsigned, bool > getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:544
iterator_range< value_op_iterator > op_values() const
void setUsesMSVCFloatingPoint(bool b)
const SDValue & getOperand(unsigned Num) const
ScheduleDAGSDNodes * createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createILPListDAGScheduler - This creates a bottom up register pressure aware list scheduler that trie...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
DIExpression * getExpression() const
virtual unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:335
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Represent the analysis usage information of a pass.
void Combine(CombineLevel Level, AliasAnalysis *AA, CodeGenOpt::Level OptLevel)
This iterates over the nodes in the SelectionDAG, folding certain types of nodes together, or eliminating superfluous nodes.
This class provides iterator support for SDUse operands that use a specific SDNode.
use_instr_iterator use_instr_begin(unsigned RegNo) const
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2306
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, unsigned ChildNo)
static unsigned getMemoryConstraintID(unsigned Flag)
Definition: InlineAsm.h:328
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:146
static void replace(Module &M, GlobalVariable *Old, GlobalVariable *New)
static cl::opt< bool > UseMBPI("use-mbpi", cl::desc("use Machine Branch Probability Info"), cl::init(true), cl::Hidden)
const APInt & getAPIntValue() const
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
const Triple & getTargetTriple() const
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:56
arg_iterator arg_begin()
Definition: Function.h:670
void setHasInlineAsm(bool B)
Set a flag that indicates that the function contains inline assembly.
virtual void PostprocessISelDAG()
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection...
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
self_iterator getIterator()
Definition: ilist_node.h:81
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits, const KnownBits &Known)
AddLiveOutRegInfo - Adds LiveOutInfo for a register.
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
void getLocation(StringRef &RelativePath, unsigned &Line, unsigned &Column) const
Return location information for this diagnostic in three parts: the relative source file path...
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1213
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint)
Augment an existing flag word returned by getFlagWord with the constraint code for a memory constrain...
Definition: InlineAsm.h:311
static cl::opt< int > EnableFastISelAbort("fast-isel-abort", cl::Hidden, cl::desc("Enable abort calls when \ast\instruction selection " "fails to lower an instruction: 0 disable the abort, 1 will " "abort but for args, calls and terminators, 2 will also " "abort for argument lowering, and 3 will never fallback " "to SelectionDAG."))
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
bool succ_empty(const Instruction *I)
Definition: CFG.h:253
SmallPtrSet< const BasicBlock *, 4 > VisitedBBs
VisitedBBs - The set of basic blocks visited thus far by instruction selection.
static void reportFastISelFailure(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R, bool ShouldAbort)
static bool isMemKind(unsigned Flag)
Definition: InlineAsm.h:276
bool isCopy() const
Extended Value Type.
Definition: ValueTypes.h:33
bool isImplicitDef() const
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:50
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
void ComputePHILiveOutRegInfo(const PHINode *)
ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI&#39;s destination register based on the LiveOutI...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
static void SplitCriticalSideEffectEdges(Function &Fn, DominatorTree *DT, LoopInfo *LI)
SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that may trap on it...
unsigned getNumOperands() const
Return the number of values used by this operation.
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:498
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
HANDLENODE node - Used as a handle for various purposes.
Definition: ISDOpcodes.h:754
iterator_range< IterTy > args() const
Definition: CallSite.h:222
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
MachineBasicBlock * MBB
MBB - The current block.
virtual bool enableMachineScheduler() const
True if the subtarget should run MachineScheduler after aggressive coalescing.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:43
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:676
unsigned first
bool isOrEquivalentToAdd(const SDNode *N) const
TargetIntrinsicInfo - Interface to description of machine instruction set.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:530
bool isGCRelocate(const CallBase *Call)
Definition: Statepoint.cpp:36
bool use_empty() const
Return true if there are no uses of this node.
bool SplitCSR
True if part of the CSRs will be handled via explicit copies.
static void mapWasmLandingPadIndex(MachineBasicBlock *MBB, const CatchPadInst *CPI)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
void dump() const
Dump this node, for debugging.
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
ScheduleDAGSDNodes * createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up list scheduler that schedules nodes in source c...
bool memoperands_empty() const
Iterator for intrusive lists based on ilist_node.
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
virtual void initializeSplitCSR(MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file. ...
BlockVerifier::State From
virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N, unsigned PatternNo, SmallVectorImpl< std::pair< SDValue, SDNode *> > &Result)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.