LLVM  9.0.0svn
SelectionDAGISel.cpp
Go to the documentation of this file.
1 //===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAGISel class.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "ScheduleDAGSDNodes.h"
15 #include "SelectionDAGBuilder.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
28 #include "llvm/Analysis/CFG.h"
33 #include "llvm/CodeGen/FastISel.h"
57 #include "llvm/IR/BasicBlock.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
61 #include "llvm/IR/DebugLoc.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Dominators.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/InlineAsm.h"
66 #include "llvm/IR/InstIterator.h"
67 #include "llvm/IR/InstrTypes.h"
68 #include "llvm/IR/Instruction.h"
69 #include "llvm/IR/Instructions.h"
70 #include "llvm/IR/IntrinsicInst.h"
71 #include "llvm/IR/Intrinsics.h"
72 #include "llvm/IR/Metadata.h"
73 #include "llvm/IR/Type.h"
74 #include "llvm/IR/User.h"
75 #include "llvm/IR/Value.h"
76 #include "llvm/MC/MCInstrDesc.h"
77 #include "llvm/MC/MCRegisterInfo.h"
78 #include "llvm/Pass.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/KnownBits.h"
88 #include "llvm/Support/Timer.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <limits>
99 #include <memory>
100 #include <string>
101 #include <utility>
102 #include <vector>
103 
104 using namespace llvm;
105 
106 #define DEBUG_TYPE "isel"
107 
108 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
109 STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
110 STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
111 STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
112 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
113 STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
114 STATISTIC(NumFastIselFailLowerArguments,
115  "Number of entry blocks where fast isel failed to lower arguments");
116 
118  "fast-isel-abort", cl::Hidden,
119  cl::desc("Enable abort calls when \"fast\" instruction selection "
120  "fails to lower an instruction: 0 disable the abort, 1 will "
121  "abort but for args, calls and terminators, 2 will also "
122  "abort for argument lowering, and 3 will never fallback "
123  "to SelectionDAG."));
124 
126  "fast-isel-report-on-fallback", cl::Hidden,
127  cl::desc("Emit a diagnostic when \"fast\" instruction selection "
128  "falls back to SelectionDAG."));
129 
130 static cl::opt<bool>
131 UseMBPI("use-mbpi",
132  cl::desc("use Machine Branch Probability Info"),
133  cl::init(true), cl::Hidden);
134 
135 #ifndef NDEBUG
137 FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
138  cl::desc("Only display the basic block whose name "
139  "matches this for all view-*-dags options"));
140 static cl::opt<bool>
141 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
142  cl::desc("Pop up a window to show dags before the first "
143  "dag combine pass"));
144 static cl::opt<bool>
145 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
146  cl::desc("Pop up a window to show dags before legalize types"));
147 static cl::opt<bool>
148 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
149  cl::desc("Pop up a window to show dags before legalize"));
150 static cl::opt<bool>
151 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
152  cl::desc("Pop up a window to show dags before the second "
153  "dag combine pass"));
154 static cl::opt<bool>
155 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
156  cl::desc("Pop up a window to show dags before the post legalize types"
157  " dag combine pass"));
158 static cl::opt<bool>
159 ViewISelDAGs("view-isel-dags", cl::Hidden,
160  cl::desc("Pop up a window to show isel dags as they are selected"));
161 static cl::opt<bool>
162 ViewSchedDAGs("view-sched-dags", cl::Hidden,
163  cl::desc("Pop up a window to show sched dags as they are processed"));
164 static cl::opt<bool>
165 ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
166  cl::desc("Pop up a window to show SUnit dags after they are processed"));
167 #else
168 static const bool ViewDAGCombine1 = false,
169  ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
170  ViewDAGCombine2 = false,
171  ViewDAGCombineLT = false,
172  ViewISelDAGs = false, ViewSchedDAGs = false,
173  ViewSUnitDAGs = false;
174 #endif
175 
176 //===---------------------------------------------------------------------===//
177 ///
178 /// RegisterScheduler class - Track the registration of instruction schedulers.
179 ///
180 //===---------------------------------------------------------------------===//
183 
184 //===---------------------------------------------------------------------===//
185 ///
186 /// ISHeuristic command line option for instruction schedulers.
187 ///
188 //===---------------------------------------------------------------------===//
191 ISHeuristic("pre-RA-sched",
193  cl::desc("Instruction schedulers available (before register"
194  " allocation):"));
195 
196 static RegisterScheduler
197 defaultListDAGScheduler("default", "Best scheduler for the target",
199 
200 namespace llvm {
201 
202  //===--------------------------------------------------------------------===//
203  /// This class is used by SelectionDAGISel to temporarily override
204  /// the optimization level on a per-function basis.
206  SelectionDAGISel &IS;
207  CodeGenOpt::Level SavedOptLevel;
208  bool SavedFastISel;
209 
210  public:
212  CodeGenOpt::Level NewOptLevel) : IS(ISel) {
213  SavedOptLevel = IS.OptLevel;
214  if (NewOptLevel == SavedOptLevel)
215  return;
216  IS.OptLevel = NewOptLevel;
217  IS.TM.setOptLevel(NewOptLevel);
218  LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
219  << IS.MF->getFunction().getName() << "\n");
220  LLVM_DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel << " ; After: -O"
221  << NewOptLevel << "\n");
222  SavedFastISel = IS.TM.Options.EnableFastISel;
223  if (NewOptLevel == CodeGenOpt::None) {
225  LLVM_DEBUG(
226  dbgs() << "\tFastISel is "
227  << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
228  << "\n");
229  }
230  }
231 
233  if (IS.OptLevel == SavedOptLevel)
234  return;
235  LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
236  << IS.MF->getFunction().getName() << "\n");
237  LLVM_DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel << " ; After: -O"
238  << SavedOptLevel << "\n");
239  IS.OptLevel = SavedOptLevel;
240  IS.TM.setOptLevel(SavedOptLevel);
241  IS.TM.setFastISel(SavedFastISel);
242  }
243  };
244 
245  //===--------------------------------------------------------------------===//
246  /// createDefaultScheduler - This creates an instruction scheduler appropriate
247  /// for the target.
249  CodeGenOpt::Level OptLevel) {
250  const TargetLowering *TLI = IS->TLI;
251  const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
252 
253  // Try first to see if the Target has its own way of selecting a scheduler
254  if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
255  return SchedulerCtor(IS, OptLevel);
256  }
257 
258  if (OptLevel == CodeGenOpt::None ||
261  return createSourceListDAGScheduler(IS, OptLevel);
263  return createBURRListDAGScheduler(IS, OptLevel);
265  return createHybridListDAGScheduler(IS, OptLevel);
266  if (TLI->getSchedulingPreference() == Sched::VLIW)
267  return createVLIWDAGScheduler(IS, OptLevel);
269  "Unknown sched type!");
270  return createILPListDAGScheduler(IS, OptLevel);
271  }
272 
273 } // end namespace llvm
274 
275 // EmitInstrWithCustomInserter - This method should be implemented by targets
276 // that mark instructions with the 'usesCustomInserter' flag. These
277 // instructions are special in various ways, which require special support to
278 // insert. The specified MachineInstr is created but not inserted into any
279 // basic blocks, and this method is called to expand it into a sequence of
280 // instructions, potentially also creating new basic blocks and control flow.
281 // When new basic blocks are inserted and the edges from MBB to its successors
282 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
283 // DenseMap.
286  MachineBasicBlock *MBB) const {
287 #ifndef NDEBUG
288  dbgs() << "If a target marks an instruction with "
289  "'usesCustomInserter', it must implement "
290  "TargetLowering::EmitInstrWithCustomInserter!";
291 #endif
292  llvm_unreachable(nullptr);
293 }
294 
296  SDNode *Node) const {
297  assert(!MI.hasPostISelHook() &&
298  "If a target marks an instruction with 'hasPostISelHook', "
299  "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
300 }
301 
302 //===----------------------------------------------------------------------===//
303 // SelectionDAGISel code
304 //===----------------------------------------------------------------------===//
305 
307  CodeGenOpt::Level OL) :
308  MachineFunctionPass(ID), TM(tm),
309  FuncInfo(new FunctionLoweringInfo()),
310  CurDAG(new SelectionDAG(tm, OL)),
311  SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
312  AA(), GFI(),
313  OptLevel(OL),
314  DAGSize(0) {
321  }
322 
324  delete SDB;
325  delete CurDAG;
326  delete FuncInfo;
327 }
328 
330  if (OptLevel != CodeGenOpt::None)
340 }
341 
342 /// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
343 /// may trap on it. In this case we have to split the edge so that the path
344 /// through the predecessor block that doesn't go to the phi block doesn't
345 /// execute the possibly trapping instruction. If available, we pass domtree
346 /// and loop info to be updated when we split critical edges. This is because
347 /// SelectionDAGISel preserves these analyses.
348 /// This is required for correctness, so it must be done at -O0.
349 ///
351  LoopInfo *LI) {
352  // Loop for blocks with phi nodes.
353  for (BasicBlock &BB : Fn) {
354  PHINode *PN = dyn_cast<PHINode>(BB.begin());
355  if (!PN) continue;
356 
357  ReprocessBlock:
358  // For each block with a PHI node, check to see if any of the input values
359  // are potentially trapping constant expressions. Constant expressions are
360  // the only potentially trapping value that can occur as the argument to a
361  // PHI.
362  for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I)); ++I)
363  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
365  if (!CE || !CE->canTrap()) continue;
366 
367  // The only case we have to worry about is when the edge is critical.
368  // Since this block has a PHI Node, we assume it has multiple input
369  // edges: check to see if the pred has multiple successors.
370  BasicBlock *Pred = PN->getIncomingBlock(i);
371  if (Pred->getTerminator()->getNumSuccessors() == 1)
372  continue;
373 
374  // Okay, we have to split this edge.
376  Pred->getTerminator(), GetSuccessorNumber(Pred, &BB),
378  goto ReprocessBlock;
379  }
380  }
381 }
382 
383 static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
384  MachineModuleInfo &MMI) {
385  // Only needed for MSVC
387  return;
388 
389  // If it's already set, nothing to do.
390  if (MMI.usesMSVCFloatingPoint())
391  return;
392 
393  for (const Instruction &I : instructions(F)) {
394  if (I.getType()->isFPOrFPVectorTy()) {
395  MMI.setUsesMSVCFloatingPoint(true);
396  return;
397  }
398  for (const auto &Op : I.operands()) {
399  if (Op->getType()->isFPOrFPVectorTy()) {
400  MMI.setUsesMSVCFloatingPoint(true);
401  return;
402  }
403  }
404  }
405 }
406 
408  // If we already selected that function, we do not need to run SDISel.
409  if (mf.getProperties().hasProperty(
411  return false;
412  // Do some sanity-checking on the command-line options.
414  "-fast-isel-abort > 0 requires -fast-isel");
415 
416  const Function &Fn = mf.getFunction();
417  MF = &mf;
418 
419  // Reset the target options before resetting the optimization
420  // level below.
421  // FIXME: This is a horrible hack and should be processed via
422  // codegen looking at the optimization level explicitly when
423  // it wants to look at it.
425  // Reset OptLevel to None for optnone functions.
426  CodeGenOpt::Level NewOptLevel = OptLevel;
427  if (OptLevel != CodeGenOpt::None && skipFunction(Fn))
428  NewOptLevel = CodeGenOpt::None;
429  OptLevelChanger OLC(*this, NewOptLevel);
430 
433  RegInfo = &MF->getRegInfo();
434  LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
435  GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
436  ORE = make_unique<OptimizationRemarkEmitter>(&Fn);
437  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
438  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
439  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
440  LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
441 
442  LLVM_DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
443 
444  SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI);
445 
446  CurDAG->init(*MF, *ORE, this, LibInfo,
447  getAnalysisIfAvailable<LegacyDivergenceAnalysis>());
448  FuncInfo->set(Fn, *MF, CurDAG);
449 
450  // Now get the optional analyzes if we want to.
451  // This is based on the possibly changed OptLevel (after optnone is taken
452  // into account). That's unfortunate but OK because it just means we won't
453  // ask for passes that have been required anyway.
454 
456  FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
457  else
458  FuncInfo->BPI = nullptr;
459 
460  if (OptLevel != CodeGenOpt::None)
461  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
462  else
463  AA = nullptr;
464 
465  SDB->init(GFI, AA, LibInfo);
466 
467  MF->setHasInlineAsm(false);
468 
469  FuncInfo->SplitCSR = false;
470 
471  // We split CSR if the target supports it for the given function
472  // and the function has only return exits.
474  FuncInfo->SplitCSR = true;
475 
476  // Collect all the return blocks.
477  for (const BasicBlock &BB : Fn) {
478  if (!succ_empty(&BB))
479  continue;
480 
481  const Instruction *Term = BB.getTerminator();
482  if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
483  continue;
484 
485  // Bail out if the exit block is not Return nor Unreachable.
486  FuncInfo->SplitCSR = false;
487  break;
488  }
489  }
490 
491  MachineBasicBlock *EntryMBB = &MF->front();
492  if (FuncInfo->SplitCSR)
493  // This performs initialization so lowering for SplitCSR will be correct.
494  TLI->initializeSplitCSR(EntryMBB);
495 
496  SelectAllBasicBlocks(Fn);
498  DiagnosticInfoISelFallback DiagFallback(Fn);
499  Fn.getContext().diagnose(DiagFallback);
500  }
501 
502  // If the first basic block in the function has live ins that need to be
503  // copied into vregs, emit the copies into the top of the block before
504  // emitting the code for the block.
506  RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
507 
508  // Insert copies in the entry block and the return blocks.
509  if (FuncInfo->SplitCSR) {
511  // Collect all the return blocks.
512  for (MachineBasicBlock &MBB : mf) {
513  if (!MBB.succ_empty())
514  continue;
515 
516  MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
517  if (Term != MBB.end() && Term->isReturn()) {
518  Returns.push_back(&MBB);
519  continue;
520  }
521  }
522  TLI->insertCopiesSplitCSR(EntryMBB, Returns);
523  }
524 
526  if (!FuncInfo->ArgDbgValues.empty())
527  for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
528  if (LI.second)
529  LiveInMap.insert(LI);
530 
531  // Insert DBG_VALUE instructions for function arguments to the entry block.
532  for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
534  bool hasFI = MI->getOperand(0).isFI();
535  unsigned Reg =
536  hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
538  EntryMBB->insert(EntryMBB->begin(), MI);
539  else {
541  if (Def) {
542  MachineBasicBlock::iterator InsertPos = Def;
543  // FIXME: VR def may not be in entry block.
544  Def->getParent()->insert(std::next(InsertPos), MI);
545  } else
546  LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
547  << TargetRegisterInfo::virtReg2Index(Reg) << "\n");
548  }
549 
550  // If Reg is live-in then update debug info to track its copy in a vreg.
551  DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
552  if (LDI != LiveInMap.end()) {
553  assert(!hasFI && "There's no handling of frame pointer updating here yet "
554  "- add if needed");
555  MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
556  MachineBasicBlock::iterator InsertPos = Def;
557  const MDNode *Variable = MI->getDebugVariable();
558  const MDNode *Expr = MI->getDebugExpression();
559  DebugLoc DL = MI->getDebugLoc();
560  bool IsIndirect = MI->isIndirectDebugValue();
561  if (IsIndirect)
562  assert(MI->getOperand(1).getImm() == 0 &&
563  "DBG_VALUE with nonzero offset");
564  assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
565  "Expected inlined-at fields to agree");
566  // Def is never a terminator here, so it is ok to increment InsertPos.
567  BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
568  IsIndirect, LDI->second, Variable, Expr);
569 
570  // If this vreg is directly copied into an exported register then
571  // that COPY instructions also need DBG_VALUE, if it is the only
572  // user of LDI->second.
573  MachineInstr *CopyUseMI = nullptr;
575  UI = RegInfo->use_instr_begin(LDI->second),
576  E = RegInfo->use_instr_end(); UI != E; ) {
577  MachineInstr *UseMI = &*(UI++);
578  if (UseMI->isDebugValue()) continue;
579  if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
580  CopyUseMI = UseMI; continue;
581  }
582  // Otherwise this is another use or second copy use.
583  CopyUseMI = nullptr; break;
584  }
585  if (CopyUseMI) {
586  // Use MI's debug location, which describes where Variable was
587  // declared, rather than whatever is attached to CopyUseMI.
588  MachineInstr *NewMI =
589  BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
590  CopyUseMI->getOperand(0).getReg(), Variable, Expr);
591  MachineBasicBlock::iterator Pos = CopyUseMI;
592  EntryMBB->insertAfter(Pos, NewMI);
593  }
594  }
595  }
596 
597  // Determine if there are any calls in this machine function.
598  MachineFrameInfo &MFI = MF->getFrameInfo();
599  for (const auto &MBB : *MF) {
600  if (MFI.hasCalls() && MF->hasInlineAsm())
601  break;
602 
603  for (const auto &MI : MBB) {
604  const MCInstrDesc &MCID = TII->get(MI.getOpcode());
605  if ((MCID.isCall() && !MCID.isReturn()) ||
606  MI.isStackAligningInlineAsm()) {
607  MFI.setHasCalls(true);
608  }
609  if (MI.isInlineAsm()) {
610  MF->setHasInlineAsm(true);
611  }
612  }
613  }
614 
615  // Determine if there is a call to setjmp in the machine function.
616  MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
617 
618  // Determine if floating point is used for msvc
619  computeUsesMSVCFloatingPoint(TM.getTargetTriple(), Fn, MF->getMMI());
620 
621  // Replace forward-declared registers with the registers containing
622  // the desired value.
623  MachineRegisterInfo &MRI = MF->getRegInfo();
626  I != E; ++I) {
627  unsigned From = I->first;
628  unsigned To = I->second;
629  // If To is also scheduled to be replaced, find what its ultimate
630  // replacement is.
631  while (true) {
633  if (J == E) break;
634  To = J->second;
635  }
636  // Make sure the new register has a sufficiently constrained register class.
639  MRI.constrainRegClass(To, MRI.getRegClass(From));
640  // Replace it.
641 
642 
643  // Replacing one register with another won't touch the kill flags.
644  // We need to conservatively clear the kill flags as a kill on the old
645  // register might dominate existing uses of the new register.
646  if (!MRI.use_empty(To))
647  MRI.clearKillFlags(From);
648  MRI.replaceRegWith(From, To);
649  }
650 
651  TLI->finalizeLowering(*MF);
652 
653  // Release function-specific state. SDB and CurDAG are already cleared
654  // at this point.
655  FuncInfo->clear();
656 
657  LLVM_DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
658  LLVM_DEBUG(MF->print(dbgs()));
659 
660  return true;
661 }
662 
666  bool ShouldAbort) {
667  // Print the function name explicitly if we don't have a debug location (which
668  // makes the diagnostic less useful) or if we're going to emit a raw error.
669  if (!R.getLocation().isValid() || ShouldAbort)
670  R << (" (in function: " + MF.getName() + ")").str();
671 
672  if (ShouldAbort)
674 
675  ORE.emit(R);
676 }
677 
678 void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
680  bool &HadTailCall) {
681  // Allow creating illegal types during DAG building for the basic block.
683 
684  // Lower the instructions. If a call is emitted as a tail call, cease emitting
685  // nodes for this block.
686  for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
687  if (!ElidedArgCopyInstrs.count(&*I))
688  SDB->visit(*I);
689  }
690 
691  // Make sure the root of the DAG is up-to-date.
693  HadTailCall = SDB->HasTailCall;
695  SDB->clear();
696 
697  // Final step, emit the lowered DAG as machine code.
698  CodeGenAndEmitDAG();
699 }
700 
701 void SelectionDAGISel::ComputeLiveOutVRegInfo() {
702  SmallPtrSet<SDNode*, 16> VisitedNodes;
703  SmallVector<SDNode*, 128> Worklist;
704 
705  Worklist.push_back(CurDAG->getRoot().getNode());
706 
707  KnownBits Known;
708 
709  do {
710  SDNode *N = Worklist.pop_back_val();
711 
712  // If we've already seen this node, ignore it.
713  if (!VisitedNodes.insert(N).second)
714  continue;
715 
716  // Otherwise, add all chain operands to the worklist.
717  for (const SDValue &Op : N->op_values())
718  if (Op.getValueType() == MVT::Other)
719  Worklist.push_back(Op.getNode());
720 
721  // If this is a CopyToReg with a vreg dest, process it.
722  if (N->getOpcode() != ISD::CopyToReg)
723  continue;
724 
725  unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
727  continue;
728 
729  // Ignore non-integer values.
730  SDValue Src = N->getOperand(2);
731  EVT SrcVT = Src.getValueType();
732  if (!SrcVT.isInteger())
733  continue;
734 
735  unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
736  Known = CurDAG->computeKnownBits(Src);
737  FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
738  } while (!Worklist.empty());
739 }
740 
741 void SelectionDAGISel::CodeGenAndEmitDAG() {
742  StringRef GroupName = "sdag";
743  StringRef GroupDescription = "Instruction Selection and Scheduling";
744  std::string BlockName;
745  int BlockNumber = -1;
746  (void)BlockNumber;
747  bool MatchFilterBB = false; (void)MatchFilterBB;
748 #ifndef NDEBUG
749  TargetTransformInfo &TTI =
750  getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*FuncInfo->Fn);
751 #endif
752 
753  // Pre-type legalization allow creation of any node types.
755 
756 #ifndef NDEBUG
757  MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
760 #endif
761 #ifdef NDEBUG
762  if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
765 #endif
766  {
767  BlockNumber = FuncInfo->MBB->getNumber();
768  BlockName =
769  (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
770  }
771  LLVM_DEBUG(dbgs() << "Initial selection DAG: "
772  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
773  << "'\n";
774  CurDAG->dump());
775 
776  if (ViewDAGCombine1 && MatchFilterBB)
777  CurDAG->viewGraph("dag-combine1 input for " + BlockName);
778 
779  // Run the DAG combiner in pre-legalize mode.
780  {
781  NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
782  GroupDescription, TimePassesIsEnabled);
784  }
785 
786 #ifndef NDEBUG
787  if (TTI.hasBranchDivergence())
789 #endif
790 
791  LLVM_DEBUG(dbgs() << "Optimized lowered selection DAG: "
792  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
793  << "'\n";
794  CurDAG->dump());
795 
796  // Second step, hack on the DAG until it only uses operations and types that
797  // the target supports.
798  if (ViewLegalizeTypesDAGs && MatchFilterBB)
799  CurDAG->viewGraph("legalize-types input for " + BlockName);
800 
801  bool Changed;
802  {
803  NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
804  GroupDescription, TimePassesIsEnabled);
805  Changed = CurDAG->LegalizeTypes();
806  }
807 
808 #ifndef NDEBUG
809  if (TTI.hasBranchDivergence())
811 #endif
812 
813  LLVM_DEBUG(dbgs() << "Type-legalized selection DAG: "
814  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
815  << "'\n";
816  CurDAG->dump());
817 
818  // Only allow creation of legal node types.
820 
821  if (Changed) {
822  if (ViewDAGCombineLT && MatchFilterBB)
823  CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
824 
825  // Run the DAG combiner in post-type-legalize mode.
826  {
827  NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
828  GroupName, GroupDescription, TimePassesIsEnabled);
830  }
831 
832 #ifndef NDEBUG
833  if (TTI.hasBranchDivergence())
835 #endif
836 
837  LLVM_DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
838  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
839  << "'\n";
840  CurDAG->dump());
841  }
842 
843  {
844  NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
845  GroupDescription, TimePassesIsEnabled);
846  Changed = CurDAG->LegalizeVectors();
847  }
848 
849  if (Changed) {
850  LLVM_DEBUG(dbgs() << "Vector-legalized selection DAG: "
851  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
852  << "'\n";
853  CurDAG->dump());
854 
855  {
856  NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
857  GroupDescription, TimePassesIsEnabled);
859  }
860 
861  LLVM_DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
862  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
863  << "'\n";
864  CurDAG->dump());
865 
866  if (ViewDAGCombineLT && MatchFilterBB)
867  CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
868 
869  // Run the DAG combiner in post-type-legalize mode.
870  {
871  NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
872  GroupName, GroupDescription, TimePassesIsEnabled);
874  }
875 
876  LLVM_DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
877  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
878  << "'\n";
879  CurDAG->dump());
880 
881 #ifndef NDEBUG
882  if (TTI.hasBranchDivergence())
884 #endif
885  }
886 
887  if (ViewLegalizeDAGs && MatchFilterBB)
888  CurDAG->viewGraph("legalize input for " + BlockName);
889 
890  {
891  NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
892  GroupDescription, TimePassesIsEnabled);
893  CurDAG->Legalize();
894  }
895 
896 #ifndef NDEBUG
897  if (TTI.hasBranchDivergence())
899 #endif
900 
901  LLVM_DEBUG(dbgs() << "Legalized selection DAG: "
902  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
903  << "'\n";
904  CurDAG->dump());
905 
906  if (ViewDAGCombine2 && MatchFilterBB)
907  CurDAG->viewGraph("dag-combine2 input for " + BlockName);
908 
909  // Run the DAG combiner in post-legalize mode.
910  {
911  NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
912  GroupDescription, TimePassesIsEnabled);
914  }
915 
916 #ifndef NDEBUG
917  if (TTI.hasBranchDivergence())
919 #endif
920 
921  LLVM_DEBUG(dbgs() << "Optimized legalized selection DAG: "
922  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
923  << "'\n";
924  CurDAG->dump());
925 
926  if (OptLevel != CodeGenOpt::None)
927  ComputeLiveOutVRegInfo();
928 
929  if (ViewISelDAGs && MatchFilterBB)
930  CurDAG->viewGraph("isel input for " + BlockName);
931 
932  // Third, instruction select all of the operations to machine code, adding the
933  // code to the MachineBasicBlock.
934  {
935  NamedRegionTimer T("isel", "Instruction Selection", GroupName,
936  GroupDescription, TimePassesIsEnabled);
937  DoInstructionSelection();
938  }
939 
940  LLVM_DEBUG(dbgs() << "Selected selection DAG: "
941  << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
942  << "'\n";
943  CurDAG->dump());
944 
945  if (ViewSchedDAGs && MatchFilterBB)
946  CurDAG->viewGraph("scheduler input for " + BlockName);
947 
948  // Schedule machine code.
949  ScheduleDAGSDNodes *Scheduler = CreateScheduler();
950  {
951  NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
952  GroupDescription, TimePassesIsEnabled);
953  Scheduler->Run(CurDAG, FuncInfo->MBB);
954  }
955 
956  if (ViewSUnitDAGs && MatchFilterBB)
957  Scheduler->viewGraph();
958 
959  // Emit machine code to BB. This can change 'BB' to the last block being
960  // inserted into.
961  MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
962  {
963  NamedRegionTimer T("emit", "Instruction Creation", GroupName,
964  GroupDescription, TimePassesIsEnabled);
965 
966  // FuncInfo->InsertPt is passed by reference and set to the end of the
967  // scheduled instructions.
968  LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
969  }
970 
971  // If the block was split, make sure we update any references that are used to
972  // update PHI nodes later on.
973  if (FirstMBB != LastMBB)
974  SDB->UpdateSplitBlock(FirstMBB, LastMBB);
975 
976  // Free the scheduler state.
977  {
978  NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
979  GroupDescription, TimePassesIsEnabled);
980  delete Scheduler;
981  }
982 
983  // Free the SelectionDAG state, now that we're finished with it.
984  CurDAG->clear();
985 }
986 
987 namespace {
988 
989 /// ISelUpdater - helper class to handle updates of the instruction selection
990 /// graph.
991 class ISelUpdater : public SelectionDAG::DAGUpdateListener {
992  SelectionDAG::allnodes_iterator &ISelPosition;
993 
994 public:
995  ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
996  : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
997 
998  /// NodeDeleted - Handle nodes deleted from the graph. If the node being
999  /// deleted is the current ISelPosition node, update ISelPosition.
1000  ///
1001  void NodeDeleted(SDNode *N, SDNode *E) override {
1002  if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1003  ++ISelPosition;
1004  }
1005 };
1006 
1007 } // end anonymous namespace
1008 
1009 // This function is used to enforce the topological node id property
1010 // property leveraged during Instruction selection. Before selection all
1011 // nodes are given a non-negative id such that all nodes have a larger id than
1012 // their operands. As this holds transitively we can prune checks that a node N
1013 // is a predecessor of M another by not recursively checking through M's
1014 // operands if N's ID is larger than M's ID. This is significantly improves
1015 // performance of for various legality checks (e.g. IsLegalToFold /
1016 // UpdateChains).
1017 
1018 // However, when we fuse multiple nodes into a single node
1019 // during selection we may induce a predecessor relationship between inputs and
1020 // outputs of distinct nodes being merged violating the topological property.
1021 // Should a fused node have a successor which has yet to be selected, our
1022 // legality checks would be incorrect. To avoid this we mark all unselected
1023 // sucessor nodes, i.e. id != -1 as invalid for pruning by bit-negating (x =>
1024 // (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1025 // We use bit-negation to more clearly enforce that node id -1 can only be
1026 // achieved by selected nodes). As the conversion is reversable the original Id,
1027 // topological pruning can still be leveraged when looking for unselected nodes.
1028 // This method is call internally in all ISel replacement calls.
1031  Nodes.push_back(Node);
1032 
1033  while (!Nodes.empty()) {
1034  SDNode *N = Nodes.pop_back_val();
1035  for (auto *U : N->uses()) {
1036  auto UId = U->getNodeId();
1037  if (UId > 0) {
1038  InvalidateNodeId(U);
1039  Nodes.push_back(U);
1040  }
1041  }
1042  }
1043 }
1044 
1045 // InvalidateNodeId - As discusses in EnforceNodeIdInvariant, mark a
1046 // NodeId with the equivalent node id which is invalid for topological
1047 // pruning.
1049  int InvalidId = -(N->getNodeId() + 1);
1050  N->setNodeId(InvalidId);
1051 }
1052 
1053 // getUninvalidatedNodeId - get original uninvalidated node id.
1055  int Id = N->getNodeId();
1056  if (Id < -1)
1057  return -(Id + 1);
1058  return Id;
1059 }
1060 
1061 void SelectionDAGISel::DoInstructionSelection() {
1062  LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1063  << printMBBReference(*FuncInfo->MBB) << " '"
1064  << FuncInfo->MBB->getName() << "'\n");
1065 
1067 
1068  // Select target instructions for the DAG.
1069  {
1070  // Number all nodes with a topological order and set DAGSize.
1072 
1073  // Create a dummy node (which is not added to allnodes), that adds
1074  // a reference to the root node, preventing it from being deleted,
1075  // and tracking any changes of the root.
1078  ++ISelPosition;
1079 
1080  // Make sure that ISelPosition gets properly updated when nodes are deleted
1081  // in calls made from this function.
1082  ISelUpdater ISU(*CurDAG, ISelPosition);
1083 
1084  // The AllNodes list is now topological-sorted. Visit the
1085  // nodes by starting at the end of the list (the root of the
1086  // graph) and preceding back toward the beginning (the entry
1087  // node).
1088  while (ISelPosition != CurDAG->allnodes_begin()) {
1089  SDNode *Node = &*--ISelPosition;
1090  // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1091  // but there are currently some corner cases that it misses. Also, this
1092  // makes it theoretically possible to disable the DAGCombiner.
1093  if (Node->use_empty())
1094  continue;
1095 
1096 #ifndef NDEBUG
1098  Nodes.push_back(Node);
1099 
1100  while (!Nodes.empty()) {
1101  auto N = Nodes.pop_back_val();
1102  if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1103  continue;
1104  for (const SDValue &Op : N->op_values()) {
1105  if (Op->getOpcode() == ISD::TokenFactor)
1106  Nodes.push_back(Op.getNode());
1107  else {
1108  // We rely on topological ordering of node ids for checking for
1109  // cycles when fusing nodes during selection. All unselected nodes
1110  // successors of an already selected node should have a negative id.
1111  // This assertion will catch such cases. If this assertion triggers
1112  // it is likely you using DAG-level Value/Node replacement functions
1113  // (versus equivalent ISEL replacement) in backend-specific
1114  // selections. See comment in EnforceNodeIdInvariant for more
1115  // details.
1116  assert(Op->getNodeId() != -1 &&
1117  "Node has already selected predecessor node");
1118  }
1119  }
1120  }
1121 #endif
1122 
1123  // When we are using non-default rounding modes or FP exception behavior
1124  // FP operations are represented by StrictFP pseudo-operations. They
1125  // need to be simplified here so that the target-specific instruction
1126  // selectors know how to handle them.
1127  //
1128  // If the current node is a strict FP pseudo-op, the isStrictFPOp()
1129  // function will provide the corresponding normal FP opcode to which the
1130  // node should be mutated.
1131  //
1132  // FIXME: The backends need a way to handle FP constraints.
1133  if (Node->isStrictFPOpcode())
1134  Node = CurDAG->mutateStrictFPToFP(Node);
1135 
1136  LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1137  Node->dump(CurDAG));
1138 
1139  Select(Node);
1140  }
1141 
1142  CurDAG->setRoot(Dummy.getValue());
1143  }
1144 
1145  LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1146 
1148 }
1149 
1151  for (const User *U : CPI->users()) {
1152  if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
1153  Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1154  if (IID == Intrinsic::eh_exceptionpointer ||
1155  IID == Intrinsic::eh_exceptioncode)
1156  return true;
1157  }
1158  }
1159  return false;
1160 }
1161 
1162 // wasm.landingpad.index intrinsic is for associating a landing pad index number
1163 // with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1164 // and store the mapping in the function.
1166  const CatchPadInst *CPI) {
1167  MachineFunction *MF = MBB->getParent();
1168  // In case of single catch (...), we don't emit LSDA, so we don't need
1169  // this information.
1170  bool IsSingleCatchAllClause =
1171  CPI->getNumArgOperands() == 1 &&
1172  cast<Constant>(CPI->getArgOperand(0))->isNullValue();
1173  if (!IsSingleCatchAllClause) {
1174  // Create a mapping from landing pad label to landing pad index.
1175  bool IntrFound = false;
1176  for (const User *U : CPI->users()) {
1177  if (const auto *Call = dyn_cast<IntrinsicInst>(U)) {
1178  Intrinsic::ID IID = Call->getIntrinsicID();
1179  if (IID == Intrinsic::wasm_landingpad_index) {
1180  Value *IndexArg = Call->getArgOperand(1);
1181  int Index = cast<ConstantInt>(IndexArg)->getZExtValue();
1182  MF->setWasmLandingPadIndex(MBB, Index);
1183  IntrFound = true;
1184  break;
1185  }
1186  }
1187  }
1188  assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1189  (void)IntrFound;
1190  }
1191 }
1192 
1193 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1194 /// do other setup for EH landing-pad blocks.
1195 bool SelectionDAGISel::PrepareEHLandingPad() {
1196  MachineBasicBlock *MBB = FuncInfo->MBB;
1197  const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1198  const BasicBlock *LLVMBB = MBB->getBasicBlock();
1199  const TargetRegisterClass *PtrRC =
1201 
1202  auto Pers = classifyEHPersonality(PersonalityFn);
1203 
1204  // Catchpads have one live-in register, which typically holds the exception
1205  // pointer or code.
1206  if (isFuncletEHPersonality(Pers)) {
1207  if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
1208  if (hasExceptionPointerOrCodeUser(CPI)) {
1209  // Get or create the virtual register to hold the pointer or code. Mark
1210  // the live in physreg and copy into the vreg.
1211  MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1212  assert(EHPhysReg && "target lacks exception pointer register");
1213  MBB->addLiveIn(EHPhysReg);
1214  unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
1216  TII->get(TargetOpcode::COPY), VReg)
1217  .addReg(EHPhysReg, RegState::Kill);
1218  }
1219  }
1220  return true;
1221  }
1222 
1223  // Add a label to mark the beginning of the landing pad. Deletion of the
1224  // landing pad can thus be detected via the MachineModuleInfo.
1225  MCSymbol *Label = MF->addLandingPad(MBB);
1226 
1227  const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
1228  BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
1229  .addSym(Label);
1230 
1231  if (Pers == EHPersonality::Wasm_CXX) {
1232  if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI()))
1233  mapWasmLandingPadIndex(MBB, CPI);
1234  } else {
1235  // Assign the call site to the landing pad's begin label.
1237  // Mark exception register as live in.
1238  if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1240  // Mark exception selector register as live in.
1241  if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1243  }
1244 
1245  return true;
1246 }
1247 
1248 /// isFoldedOrDeadInstruction - Return true if the specified instruction is
1249 /// side-effect free and is either dead or folded into a generated instruction.
1250 /// Return false if it needs to be emitted.
1253  return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1254  !I->isTerminator() && // Terminators aren't folded.
1255  !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
1256  !I->isEHPad() && // EH pad instructions aren't folded.
1257  !FuncInfo->isExportedInst(I); // Exported instrs must be computed.
1258 }
1259 
1260 /// Set up SwiftErrorVals by going through the function. If the function has
1261 /// swifterror argument, it will be the first entry.
1262 static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI,
1264  if (!TLI->supportSwiftError())
1265  return;
1266 
1267  FuncInfo->SwiftErrorVals.clear();
1268  FuncInfo->SwiftErrorVRegDefMap.clear();
1269  FuncInfo->SwiftErrorVRegUpwardsUse.clear();
1270  FuncInfo->SwiftErrorVRegDefUses.clear();
1271  FuncInfo->SwiftErrorArg = nullptr;
1272 
1273  // Check if function has a swifterror argument.
1274  bool HaveSeenSwiftErrorArg = false;
1275  for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end();
1276  AI != AE; ++AI)
1277  if (AI->hasSwiftErrorAttr()) {
1278  assert(!HaveSeenSwiftErrorArg &&
1279  "Must have only one swifterror parameter");
1280  (void)HaveSeenSwiftErrorArg; // silence warning.
1281  HaveSeenSwiftErrorArg = true;
1282  FuncInfo->SwiftErrorArg = &*AI;
1283  FuncInfo->SwiftErrorVals.push_back(&*AI);
1284  }
1285 
1286  for (const auto &LLVMBB : Fn)
1287  for (const auto &Inst : LLVMBB) {
1288  if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst))
1289  if (Alloca->isSwiftError())
1290  FuncInfo->SwiftErrorVals.push_back(Alloca);
1291  }
1292 }
1293 
1295  FastISel *FastIS,
1296  const TargetLowering *TLI,
1297  const TargetInstrInfo *TII,
1299  if (!TLI->supportSwiftError())
1300  return;
1301 
1302  // We only need to do this when we have swifterror parameter or swifterror
1303  // alloc.
1304  if (FuncInfo->SwiftErrorVals.empty())
1305  return;
1306 
1307  assert(FuncInfo->MBB == &*FuncInfo->MF->begin() &&
1308  "expected to insert into entry block");
1309  auto &DL = FuncInfo->MF->getDataLayout();
1310  auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
1311  for (const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
1312  // We will always generate a copy from the argument. It is always used at
1313  // least by the 'return' of the swifterror.
1314  if (FuncInfo->SwiftErrorArg && FuncInfo->SwiftErrorArg == SwiftErrorVal)
1315  continue;
1316  unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
1317  // Assign Undef to Vreg. We construct MI directly to make sure it works
1318  // with FastISel.
1319  BuildMI(*FuncInfo->MBB, FuncInfo->MBB->getFirstNonPHI(),
1320  SDB->getCurDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF),
1321  VReg);
1322 
1323  // Keep FastIS informed about the value we just inserted.
1324  if (FastIS)
1325  FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1326 
1327  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorVal, VReg);
1328  }
1329 }
1330 
1331 /// Collect llvm.dbg.declare information. This is done after argument lowering
1332 /// in case the declarations refer to arguments.
1334  MachineFunction *MF = FuncInfo->MF;
1335  const DataLayout &DL = MF->getDataLayout();
1336  for (const BasicBlock &BB : *FuncInfo->Fn) {
1337  for (const Instruction &I : BB) {
1338  const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(&I);
1339  if (!DI)
1340  continue;
1341 
1342  assert(DI->getVariable() && "Missing variable");
1343  assert(DI->getDebugLoc() && "Missing location");
1344  const Value *Address = DI->getAddress();
1345  if (!Address)
1346  continue;
1347 
1348  // Look through casts and constant offset GEPs. These mostly come from
1349  // inalloca.
1350  APInt Offset(DL.getTypeSizeInBits(Address->getType()), 0);
1351  Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1352 
1353  // Check if the variable is a static alloca or a byval or inalloca
1354  // argument passed in memory. If it is not, then we will ignore this
1355  // intrinsic and handle this during isel like dbg.value.
1356  int FI = std::numeric_limits<int>::max();
1357  if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
1358  auto SI = FuncInfo->StaticAllocaMap.find(AI);
1359  if (SI != FuncInfo->StaticAllocaMap.end())
1360  FI = SI->second;
1361  } else if (const auto *Arg = dyn_cast<Argument>(Address))
1362  FI = FuncInfo->getArgumentFrameIndex(Arg);
1363 
1364  if (FI == std::numeric_limits<int>::max())
1365  continue;
1366 
1367  DIExpression *Expr = DI->getExpression();
1368  if (Offset.getBoolValue())
1370  Offset.getZExtValue());
1371  MF->setVariableDbgInfo(DI->getVariable(), Expr, FI, DI->getDebugLoc());
1372  }
1373  }
1374 }
1375 
1376 /// Propagate swifterror values through the machine function CFG.
1378  auto *TLI = FuncInfo->TLI;
1379  if (!TLI->supportSwiftError())
1380  return;
1381 
1382  // We only need to do this when we have swifterror parameter or swifterror
1383  // alloc.
1384  if (FuncInfo->SwiftErrorVals.empty())
1385  return;
1386 
1387  // For each machine basic block in reverse post order.
1389  for (MachineBasicBlock *MBB : RPOT) {
1390  // For each swifterror value in the function.
1391  for(const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
1392  auto Key = std::make_pair(MBB, SwiftErrorVal);
1393  auto UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
1394  auto VRegDefIt = FuncInfo->SwiftErrorVRegDefMap.find(Key);
1395  bool UpwardsUse = UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end();
1396  unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
1397  bool DownwardDef = VRegDefIt != FuncInfo->SwiftErrorVRegDefMap.end();
1398  assert(!(UpwardsUse && !DownwardDef) &&
1399  "We can't have an upwards use but no downwards def");
1400 
1401  // If there is no upwards exposed use and an entry for the swifterror in
1402  // the def map for this value we don't need to do anything: We already
1403  // have a downward def for this basic block.
1404  if (!UpwardsUse && DownwardDef)
1405  continue;
1406 
1407  // Otherwise we either have an upwards exposed use vreg that we need to
1408  // materialize or need to forward the downward def from predecessors.
1409 
1410  // Check whether we have a single vreg def from all predecessors.
1411  // Otherwise we need a phi.
1414  for (auto *Pred : MBB->predecessors()) {
1415  if (!Visited.insert(Pred).second)
1416  continue;
1417  VRegs.push_back(std::make_pair(
1418  Pred, FuncInfo->getOrCreateSwiftErrorVReg(Pred, SwiftErrorVal)));
1419  if (Pred != MBB)
1420  continue;
1421  // We have a self-edge.
1422  // If there was no upwards use in this basic block there is now one: the
1423  // phi needs to use it self.
1424  if (!UpwardsUse) {
1425  UpwardsUse = true;
1426  UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
1427  assert(UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end());
1428  UUseVReg = UUseIt->second;
1429  }
1430  }
1431 
1432  // We need a phi node if we have more than one predecessor with different
1433  // downward defs.
1434  bool needPHI =
1435  VRegs.size() >= 1 &&
1436  std::find_if(
1437  VRegs.begin(), VRegs.end(),
1438  [&](const std::pair<const MachineBasicBlock *, unsigned> &V)
1439  -> bool { return V.second != VRegs[0].second; }) !=
1440  VRegs.end();
1441 
1442  // If there is no upwards exposed used and we don't need a phi just
1443  // forward the swifterror vreg from the predecessor(s).
1444  if (!UpwardsUse && !needPHI) {
1445  assert(!VRegs.empty() &&
1446  "No predecessors? The entry block should bail out earlier");
1447  // Just forward the swifterror vreg from the predecessor(s).
1448  FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, VRegs[0].second);
1449  continue;
1450  }
1451 
1452  auto DLoc = isa<Instruction>(SwiftErrorVal)
1453  ? cast<Instruction>(SwiftErrorVal)->getDebugLoc()
1454  : DebugLoc();
1455  const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo();
1456 
1457  // If we don't need a phi create a copy to the upward exposed vreg.
1458  if (!needPHI) {
1459  assert(UpwardsUse);
1460  assert(!VRegs.empty() &&
1461  "No predecessors? Is the Calling Convention correct?");
1462  unsigned DestReg = UUseVReg;
1463  BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY),
1464  DestReg)
1465  .addReg(VRegs[0].second);
1466  continue;
1467  }
1468 
1469  // We need a phi: if there is an upwards exposed use we already have a
1470  // destination virtual register number otherwise we generate a new one.
1471  auto &DL = FuncInfo->MF->getDataLayout();
1472  auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
1473  unsigned PHIVReg =
1474  UpwardsUse ? UUseVReg
1475  : FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
1476  MachineInstrBuilder SwiftErrorPHI =
1477  BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
1478  TII->get(TargetOpcode::PHI), PHIVReg);
1479  for (auto BBRegPair : VRegs) {
1480  SwiftErrorPHI.addReg(BBRegPair.second).addMBB(BBRegPair.first);
1481  }
1482 
1483  // We did not have a definition in this block before: store the phi's vreg
1484  // as this block downward exposed def.
1485  if (!UpwardsUse)
1486  FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, PHIVReg);
1487  }
1488  }
1489 }
1490 
1495  if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty())
1496  return;
1497 
1498  // Iterator over instructions and assign vregs to swifterror defs and uses.
1499  for (auto It = Begin; It != End; ++It) {
1500  ImmutableCallSite CS(&*It);
1501  if (CS) {
1502  // A call-site with a swifterror argument is both use and def.
1503  const Value *SwiftErrorAddr = nullptr;
1504  for (auto &Arg : CS.args()) {
1505  if (!Arg->isSwiftError())
1506  continue;
1507  // Use of swifterror.
1508  assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments");
1509  SwiftErrorAddr = &*Arg;
1510  assert(SwiftErrorAddr->isSwiftError() &&
1511  "Must have a swifterror value argument");
1512  unsigned VReg; bool CreatedReg;
1513  std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
1514  &*It, FuncInfo->MBB, SwiftErrorAddr);
1515  assert(CreatedReg);
1516  }
1517  if (!SwiftErrorAddr)
1518  continue;
1519 
1520  // Def of swifterror.
1521  unsigned VReg; bool CreatedReg;
1522  std::tie(VReg, CreatedReg) =
1523  FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
1524  assert(CreatedReg);
1525  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
1526 
1527  // A load is a use.
1528  } else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) {
1529  const Value *V = LI->getOperand(0);
1530  if (!V->isSwiftError())
1531  continue;
1532 
1533  unsigned VReg; bool CreatedReg;
1534  std::tie(VReg, CreatedReg) =
1535  FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V);
1536  assert(CreatedReg);
1537 
1538  // A store is a def.
1539  } else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) {
1540  const Value *SwiftErrorAddr = SI->getOperand(1);
1541  if (!SwiftErrorAddr->isSwiftError())
1542  continue;
1543 
1544  // Def of swifterror.
1545  unsigned VReg; bool CreatedReg;
1546  std::tie(VReg, CreatedReg) =
1547  FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
1548  assert(CreatedReg);
1549  FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
1550 
1551  // A return in a swiferror returning function is a use.
1552  } else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) {
1553  const Function *F = R->getParent()->getParent();
1554  if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1555  continue;
1556 
1557  unsigned VReg; bool CreatedReg;
1558  std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
1559  R, FuncInfo->MBB, FuncInfo->SwiftErrorArg);
1560  assert(CreatedReg);
1561  }
1562  }
1563 }
1564 
1565 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1566  FastISelFailed = false;
1567  // Initialize the Fast-ISel state, if needed.
1568  FastISel *FastIS = nullptr;
1569  if (TM.Options.EnableFastISel) {
1570  LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1571  FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1572  }
1573 
1575 
1577 
1578  // Lower arguments up front. An RPO iteration always visits the entry block
1579  // first.
1580  assert(*RPOT.begin() == &Fn.getEntryBlock());
1581  ++NumEntryBlocks;
1582 
1583  // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1586 
1588 
1589  if (!FastIS) {
1590  LowerArguments(Fn);
1591  } else {
1592  // See if fast isel can lower the arguments.
1593  FastIS->startNewBlock();
1594  if (!FastIS->lowerArguments()) {
1595  FastISelFailed = true;
1596  // Fast isel failed to lower these arguments
1597  ++NumFastIselFailLowerArguments;
1598 
1599  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1600  Fn.getSubprogram(),
1601  &Fn.getEntryBlock());
1602  R << "FastISel didn't lower all arguments: "
1603  << ore::NV("Prototype", Fn.getType());
1605 
1606  // Use SelectionDAG argument lowering
1607  LowerArguments(Fn);
1609  SDB->clear();
1610  CodeGenAndEmitDAG();
1611  }
1612 
1613  // If we inserted any instructions at the beginning, make a note of
1614  // where they are, so we can be sure to emit subsequent instructions
1615  // after them.
1616  if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1617  FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1618  else
1619  FastIS->setLastLocalValue(nullptr);
1620  }
1622 
1624 
1625  // Iterate over all basic blocks in the function.
1626  StackProtector &SP = getAnalysis<StackProtector>();
1627  for (const BasicBlock *LLVMBB : RPOT) {
1628  if (OptLevel != CodeGenOpt::None) {
1629  bool AllPredsVisited = true;
1630  for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
1631  PI != PE; ++PI) {
1632  if (!FuncInfo->VisitedBBs.count(*PI)) {
1633  AllPredsVisited = false;
1634  break;
1635  }
1636  }
1637 
1638  if (AllPredsVisited) {
1639  for (const PHINode &PN : LLVMBB->phis())
1641  } else {
1642  for (const PHINode &PN : LLVMBB->phis())
1644  }
1645 
1646  FuncInfo->VisitedBBs.insert(LLVMBB);
1647  }
1648 
1649  BasicBlock::const_iterator const Begin =
1650  LLVMBB->getFirstNonPHI()->getIterator();
1651  BasicBlock::const_iterator const End = LLVMBB->end();
1652  BasicBlock::const_iterator BI = End;
1653 
1654  FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1655  if (!FuncInfo->MBB)
1656  continue; // Some blocks like catchpads have no code or MBB.
1657 
1658  // Insert new instructions after any phi or argument setup code.
1659  FuncInfo->InsertPt = FuncInfo->MBB->end();
1660 
1661  // Setup an EH landing-pad block.
1664  if (LLVMBB->isEHPad())
1665  if (!PrepareEHLandingPad())
1666  continue;
1667 
1668  // Before doing SelectionDAG ISel, see if FastISel has been requested.
1669  if (FastIS) {
1670  if (LLVMBB != &Fn.getEntryBlock())
1671  FastIS->startNewBlock();
1672 
1673  unsigned NumFastIselRemaining = std::distance(Begin, End);
1674 
1675  // Pre-assign swifterror vregs.
1676  preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End);
1677 
1678  // Do FastISel on as many instructions as possible.
1679  for (; BI != Begin; --BI) {
1680  const Instruction *Inst = &*std::prev(BI);
1681 
1682  // If we no longer require this instruction, skip it.
1683  if (isFoldedOrDeadInstruction(Inst, FuncInfo) ||
1684  ElidedArgCopyInstrs.count(Inst)) {
1685  --NumFastIselRemaining;
1686  continue;
1687  }
1688 
1689  // Bottom-up: reset the insert pos at the top, after any local-value
1690  // instructions.
1691  FastIS->recomputeInsertPt();
1692 
1693  // Try to select the instruction with FastISel.
1694  if (FastIS->selectInstruction(Inst)) {
1695  --NumFastIselRemaining;
1696  ++NumFastIselSuccess;
1697  // If fast isel succeeded, skip over all the folded instructions, and
1698  // then see if there is a load right before the selected instructions.
1699  // Try to fold the load if so.
1700  const Instruction *BeforeInst = Inst;
1701  while (BeforeInst != &*Begin) {
1702  BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
1703  if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
1704  break;
1705  }
1706  if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
1707  BeforeInst->hasOneUse() &&
1708  FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
1709  // If we succeeded, don't re-select the load.
1710  BI = std::next(BasicBlock::const_iterator(BeforeInst));
1711  --NumFastIselRemaining;
1712  ++NumFastIselSuccess;
1713  }
1714  continue;
1715  }
1716 
1717  FastISelFailed = true;
1718 
1719  // Then handle certain instructions as single-LLVM-Instruction blocks.
1720  // We cannot separate out GCrelocates to their own blocks since we need
1721  // to keep track of gc-relocates for a particular gc-statepoint. This is
1722  // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1723  // visitGCRelocate.
1724  if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst) &&
1725  !isGCResult(Inst)) {
1726  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1727  Inst->getDebugLoc(), LLVMBB);
1728 
1729  R << "FastISel missed call";
1730 
1731  if (R.isEnabled() || EnableFastISelAbort) {
1732  std::string InstStrStorage;
1733  raw_string_ostream InstStr(InstStrStorage);
1734  InstStr << *Inst;
1735 
1736  R << ": " << InstStr.str();
1737  }
1738 
1740 
1741  if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1742  !Inst->use_empty()) {
1743  unsigned &R = FuncInfo->ValueMap[Inst];
1744  if (!R)
1745  R = FuncInfo->CreateRegs(Inst->getType());
1746  }
1747 
1748  bool HadTailCall = false;
1750  SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
1751 
1752  // If the call was emitted as a tail call, we're done with the block.
1753  // We also need to delete any previously emitted instructions.
1754  if (HadTailCall) {
1755  FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
1756  --BI;
1757  break;
1758  }
1759 
1760  // Recompute NumFastIselRemaining as Selection DAG instruction
1761  // selection may have handled the call, input args, etc.
1762  unsigned RemainingNow = std::distance(Begin, BI);
1763  NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1764  NumFastIselRemaining = RemainingNow;
1765  continue;
1766  }
1767 
1768  OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1769  Inst->getDebugLoc(), LLVMBB);
1770 
1771  bool ShouldAbort = EnableFastISelAbort;
1772  if (Inst->isTerminator()) {
1773  // Use a different message for terminator misses.
1774  R << "FastISel missed terminator";
1775  // Don't abort for terminator unless the level is really high
1776  ShouldAbort = (EnableFastISelAbort > 2);
1777  } else {
1778  R << "FastISel missed";
1779  }
1780 
1781  if (R.isEnabled() || EnableFastISelAbort) {
1782  std::string InstStrStorage;
1783  raw_string_ostream InstStr(InstStrStorage);
1784  InstStr << *Inst;
1785  R << ": " << InstStr.str();
1786  }
1787 
1788  reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
1789 
1790  NumFastIselFailures += NumFastIselRemaining;
1791  break;
1792  }
1793 
1794  FastIS->recomputeInsertPt();
1795  }
1796 
1797  if (SP.shouldEmitSDCheck(*LLVMBB)) {
1798  bool FunctionBasedInstrumentation =
1800  SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
1801  FunctionBasedInstrumentation);
1802  }
1803 
1804  if (Begin != BI)
1805  ++NumDAGBlocks;
1806  else
1807  ++NumFastIselBlocks;
1808 
1809  if (Begin != BI) {
1810  // Run SelectionDAG instruction selection on the remainder of the block
1811  // not handled by FastISel. If FastISel is not run, this is the entire
1812  // block.
1813  bool HadTailCall;
1814  SelectBasicBlock(Begin, BI, HadTailCall);
1815 
1816  // But if FastISel was run, we already selected some of the block.
1817  // If we emitted a tail-call, we need to delete any previously emitted
1818  // instruction that follows it.
1819  if (HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1821  }
1822 
1823  if (FastIS)
1824  FastIS->finishBasicBlock();
1825  FinishBasicBlock();
1826  FuncInfo->PHINodesToUpdate.clear();
1827  ElidedArgCopyInstrs.clear();
1828  }
1829 
1831 
1833 
1834  delete FastIS;
1836  SDB->SPDescriptor.resetPerFunctionState();
1837 }
1838 
1839 /// Given that the input MI is before a partial terminator sequence TSeq, return
1840 /// true if M + TSeq also a partial terminator sequence.
1841 ///
1842 /// A Terminator sequence is a sequence of MachineInstrs which at this point in
1843 /// lowering copy vregs into physical registers, which are then passed into
1844 /// terminator instructors so we can satisfy ABI constraints. A partial
1845 /// terminator sequence is an improper subset of a terminator sequence (i.e. it
1846 /// may be the whole terminator sequence).
1848  // If we do not have a copy or an implicit def, we return true if and only if
1849  // MI is a debug value.
1850  if (!MI.isCopy() && !MI.isImplicitDef())
1851  // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
1852  // physical registers if there is debug info associated with the terminator
1853  // of our mbb. We want to include said debug info in our terminator
1854  // sequence, so we return true in that case.
1855  return MI.isDebugValue();
1856 
1857  // We have left the terminator sequence if we are not doing one of the
1858  // following:
1859  //
1860  // 1. Copying a vreg into a physical register.
1861  // 2. Copying a vreg into a vreg.
1862  // 3. Defining a register via an implicit def.
1863 
1864  // OPI should always be a register definition...
1866  if (!OPI->isReg() || !OPI->isDef())
1867  return false;
1868 
1869  // Defining any register via an implicit def is always ok.
1870  if (MI.isImplicitDef())
1871  return true;
1872 
1873  // Grab the copy source...
1875  ++OPI2;
1876  assert(OPI2 != MI.operands_end()
1877  && "Should have a copy implying we should have 2 arguments.");
1878 
1879  // Make sure that the copy dest is not a vreg when the copy source is a
1880  // physical register.
1881  if (!OPI2->isReg() ||
1884  return false;
1885 
1886  return true;
1887 }
1888 
1889 /// Find the split point at which to splice the end of BB into its success stack
1890 /// protector check machine basic block.
1891 ///
1892 /// On many platforms, due to ABI constraints, terminators, even before register
1893 /// allocation, use physical registers. This creates an issue for us since
1894 /// physical registers at this point can not travel across basic
1895 /// blocks. Luckily, selectiondag always moves physical registers into vregs
1896 /// when they enter functions and moves them through a sequence of copies back
1897 /// into the physical registers right before the terminator creating a
1898 /// ``Terminator Sequence''. This function is searching for the beginning of the
1899 /// terminator sequence so that we can ensure that we splice off not just the
1900 /// terminator, but additionally the copies that move the vregs into the
1901 /// physical registers.
1905  //
1906  if (SplitPoint == BB->begin())
1907  return SplitPoint;
1908 
1909  MachineBasicBlock::iterator Start = BB->begin();
1910  MachineBasicBlock::iterator Previous = SplitPoint;
1911  --Previous;
1912 
1913  while (MIIsInTerminatorSequence(*Previous)) {
1914  SplitPoint = Previous;
1915  if (Previous == Start)
1916  break;
1917  --Previous;
1918  }
1919 
1920  return SplitPoint;
1921 }
1922 
1923 void
1924 SelectionDAGISel::FinishBasicBlock() {
1925  LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1926  << FuncInfo->PHINodesToUpdate.size() << "\n";
1927  for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1928  ++i) dbgs()
1929  << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1930  << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1931 
1932  // Next, now that we know what the last MBB the LLVM BB expanded is, update
1933  // PHI nodes in successors.
1934  for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1936  assert(PHI->isPHI() &&
1937  "This is not a machine PHI node that we are updating!");
1938  if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
1939  continue;
1940  PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
1941  }
1942 
1943  // Handle stack protector.
1944  if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1945  // The target provides a guard check function. There is no need to
1946  // generate error handling code or to split current basic block.
1947  MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1948 
1949  // Add load and check to the basicblock.
1950  FuncInfo->MBB = ParentMBB;
1951  FuncInfo->InsertPt =
1954  CurDAG->setRoot(SDB->getRoot());
1955  SDB->clear();
1956  CodeGenAndEmitDAG();
1957 
1958  // Clear the Per-BB State.
1959  SDB->SPDescriptor.resetPerBBState();
1960  } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1961  MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1962  MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1963 
1964  // Find the split point to split the parent mbb. At the same time copy all
1965  // physical registers used in the tail of parent mbb into virtual registers
1966  // before the split point and back into physical registers after the split
1967  // point. This prevents us needing to deal with Live-ins and many other
1968  // register allocation issues caused by us splitting the parent mbb. The
1969  // register allocator will clean up said virtual copies later on.
1970  MachineBasicBlock::iterator SplitPoint =
1972 
1973  // Splice the terminator of ParentMBB into SuccessMBB.
1974  SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
1975  SplitPoint,
1976  ParentMBB->end());
1977 
1978  // Add compare/jump on neq/jump to the parent BB.
1979  FuncInfo->MBB = ParentMBB;
1980  FuncInfo->InsertPt = ParentMBB->end();
1982  CurDAG->setRoot(SDB->getRoot());
1983  SDB->clear();
1984  CodeGenAndEmitDAG();
1985 
1986  // CodeGen Failure MBB if we have not codegened it yet.
1987  MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1988  if (FailureMBB->empty()) {
1989  FuncInfo->MBB = FailureMBB;
1990  FuncInfo->InsertPt = FailureMBB->end();
1992  CurDAG->setRoot(SDB->getRoot());
1993  SDB->clear();
1994  CodeGenAndEmitDAG();
1995  }
1996 
1997  // Clear the Per-BB State.
1998  SDB->SPDescriptor.resetPerBBState();
1999  }
2000 
2001  // Lower each BitTestBlock.
2002  for (auto &BTB : SDB->BitTestCases) {
2003  // Lower header first, if it wasn't already lowered
2004  if (!BTB.Emitted) {
2005  // Set the current basic block to the mbb we wish to insert the code into
2006  FuncInfo->MBB = BTB.Parent;
2007  FuncInfo->InsertPt = FuncInfo->MBB->end();
2008  // Emit the code
2010  CurDAG->setRoot(SDB->getRoot());
2011  SDB->clear();
2012  CodeGenAndEmitDAG();
2013  }
2014 
2015  BranchProbability UnhandledProb = BTB.Prob;
2016  for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
2017  UnhandledProb -= BTB.Cases[j].ExtraProb;
2018  // Set the current basic block to the mbb we wish to insert the code into
2019  FuncInfo->MBB = BTB.Cases[j].ThisBB;
2020  FuncInfo->InsertPt = FuncInfo->MBB->end();
2021  // Emit the code
2022 
2023  // If all cases cover a contiguous range, it is not necessary to jump to
2024  // the default block after the last bit test fails. This is because the
2025  // range check during bit test header creation has guaranteed that every
2026  // case here doesn't go outside the range. In this case, there is no need
2027  // to perform the last bit test, as it will always be true. Instead, make
2028  // the second-to-last bit-test fall through to the target of the last bit
2029  // test, and delete the last bit test.
2030 
2031  MachineBasicBlock *NextMBB;
2032  if (BTB.ContiguousRange && j + 2 == ej) {
2033  // Second-to-last bit-test with contiguous range: fall through to the
2034  // target of the final bit test.
2035  NextMBB = BTB.Cases[j + 1].TargetBB;
2036  } else if (j + 1 == ej) {
2037  // For the last bit test, fall through to Default.
2038  NextMBB = BTB.Default;
2039  } else {
2040  // Otherwise, fall through to the next bit test.
2041  NextMBB = BTB.Cases[j + 1].ThisBB;
2042  }
2043 
2044  SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
2045  FuncInfo->MBB);
2046 
2047  CurDAG->setRoot(SDB->getRoot());
2048  SDB->clear();
2049  CodeGenAndEmitDAG();
2050 
2051  if (BTB.ContiguousRange && j + 2 == ej) {
2052  // Since we're not going to use the final bit test, remove it.
2053  BTB.Cases.pop_back();
2054  break;
2055  }
2056  }
2057 
2058  // Update PHI Nodes
2059  for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
2060  pi != pe; ++pi) {
2061  MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
2062  MachineBasicBlock *PHIBB = PHI->getParent();
2063  assert(PHI->isPHI() &&
2064  "This is not a machine PHI node that we are updating!");
2065  // This is "default" BB. We have two jumps to it. From "header" BB and
2066  // from last "case" BB, unless the latter was skipped.
2067  if (PHIBB == BTB.Default) {
2068  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent);
2069  if (!BTB.ContiguousRange) {
2070  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
2071  .addMBB(BTB.Cases.back().ThisBB);
2072  }
2073  }
2074  // One of "cases" BB.
2075  for (unsigned j = 0, ej = BTB.Cases.size();
2076  j != ej; ++j) {
2077  MachineBasicBlock* cBB = BTB.Cases[j].ThisBB;
2078  if (cBB->isSuccessor(PHIBB))
2079  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB);
2080  }
2081  }
2082  }
2083  SDB->BitTestCases.clear();
2084 
2085  // If the JumpTable record is filled in, then we need to emit a jump table.
2086  // Updating the PHI nodes is tricky in this case, since we need to determine
2087  // whether the PHI is a successor of the range check MBB or the jump table MBB
2088  for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
2089  // Lower header first, if it wasn't already lowered
2090  if (!SDB->JTCases[i].first.Emitted) {
2091  // Set the current basic block to the mbb we wish to insert the code into
2092  FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
2093  FuncInfo->InsertPt = FuncInfo->MBB->end();
2094  // Emit the code
2095  SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
2096  FuncInfo->MBB);
2097  CurDAG->setRoot(SDB->getRoot());
2098  SDB->clear();
2099  CodeGenAndEmitDAG();
2100  }
2101 
2102  // Set the current basic block to the mbb we wish to insert the code into
2103  FuncInfo->MBB = SDB->JTCases[i].second.MBB;
2104  FuncInfo->InsertPt = FuncInfo->MBB->end();
2105  // Emit the code
2106  SDB->visitJumpTable(SDB->JTCases[i].second);
2107  CurDAG->setRoot(SDB->getRoot());
2108  SDB->clear();
2109  CodeGenAndEmitDAG();
2110 
2111  // Update PHI Nodes
2112  for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
2113  pi != pe; ++pi) {
2114  MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
2115  MachineBasicBlock *PHIBB = PHI->getParent();
2116  assert(PHI->isPHI() &&
2117  "This is not a machine PHI node that we are updating!");
2118  // "default" BB. We can go there only from header BB.
2119  if (PHIBB == SDB->JTCases[i].second.Default)
2120  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
2121  .addMBB(SDB->JTCases[i].first.HeaderBB);
2122  // JT BB. Just iterate over successors here
2123  if (FuncInfo->MBB->isSuccessor(PHIBB))
2124  PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
2125  }
2126  }
2127  SDB->JTCases.clear();
2128 
2129  // If we generated any switch lowering information, build and codegen any
2130  // additional DAGs necessary.
2131  for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
2132  // Set the current basic block to the mbb we wish to insert the code into
2133  FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
2134  FuncInfo->InsertPt = FuncInfo->MBB->end();
2135 
2136  // Determine the unique successors.
2138  Succs.push_back(SDB->SwitchCases[i].TrueBB);
2139  if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
2140  Succs.push_back(SDB->SwitchCases[i].FalseBB);
2141 
2142  // Emit the code. Note that this could result in FuncInfo->MBB being split.
2144  CurDAG->setRoot(SDB->getRoot());
2145  SDB->clear();
2146  CodeGenAndEmitDAG();
2147 
2148  // Remember the last block, now that any splitting is done, for use in
2149  // populating PHI nodes in successors.
2150  MachineBasicBlock *ThisBB = FuncInfo->MBB;
2151 
2152  // Handle any PHI nodes in successors of this chunk, as if we were coming
2153  // from the original BB before switch expansion. Note that PHI nodes can
2154  // occur multiple times in PHINodesToUpdate. We have to be very careful to
2155  // handle them the right number of times.
2156  for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
2157  FuncInfo->MBB = Succs[i];
2158  FuncInfo->InsertPt = FuncInfo->MBB->end();
2159  // FuncInfo->MBB may have been removed from the CFG if a branch was
2160  // constant folded.
2161  if (ThisBB->isSuccessor(FuncInfo->MBB)) {
2163  MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
2164  MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
2165  MachineInstrBuilder PHI(*MF, MBBI);
2166  // This value for this PHI node is recorded in PHINodesToUpdate.
2167  for (unsigned pn = 0; ; ++pn) {
2168  assert(pn != FuncInfo->PHINodesToUpdate.size() &&
2169  "Didn't find PHI entry!");
2170  if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
2171  PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
2172  break;
2173  }
2174  }
2175  }
2176  }
2177  }
2178  }
2179  SDB->SwitchCases.clear();
2180 }
2181 
2182 /// Create the scheduler. If a specific scheduler was specified
2183 /// via the SchedulerRegistry, use it, otherwise select the
2184 /// one preferred by the target.
2185 ///
2186 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
2187  return ISHeuristic(this, OptLevel);
2188 }
2189 
2190 //===----------------------------------------------------------------------===//
2191 // Helper functions used by the generated instruction selector.
2192 //===----------------------------------------------------------------------===//
2193 // Calls to these methods are generated by tblgen.
2194 
2195 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
2196 /// the dag combiner simplified the 255, we still want to match. RHS is the
2197 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2198 /// specified in the .td file (e.g. 255).
2200  int64_t DesiredMaskS) const {
2201  const APInt &ActualMask = RHS->getAPIntValue();
2202  const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2203 
2204  // If the actual mask exactly matches, success!
2205  if (ActualMask == DesiredMask)
2206  return true;
2207 
2208  // If the actual AND mask is allowing unallowed bits, this doesn't match.
2209  if (!ActualMask.isSubsetOf(DesiredMask))
2210  return false;
2211 
2212  // Otherwise, the DAG Combiner may have proven that the value coming in is
2213  // either already zero or is not demanded. Check for known zero input bits.
2214  APInt NeededMask = DesiredMask & ~ActualMask;
2215  if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
2216  return true;
2217 
2218  // TODO: check to see if missing bits are just not demanded.
2219 
2220  // Otherwise, this pattern doesn't match.
2221  return false;
2222 }
2223 
2224 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
2225 /// the dag combiner simplified the 255, we still want to match. RHS is the
2226 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2227 /// specified in the .td file (e.g. 255).
2229  int64_t DesiredMaskS) const {
2230  const APInt &ActualMask = RHS->getAPIntValue();
2231  const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2232 
2233  // If the actual mask exactly matches, success!
2234  if (ActualMask == DesiredMask)
2235  return true;
2236 
2237  // If the actual AND mask is allowing unallowed bits, this doesn't match.
2238  if (!ActualMask.isSubsetOf(DesiredMask))
2239  return false;
2240 
2241  // Otherwise, the DAG Combiner may have proven that the value coming in is
2242  // either already zero or is not demanded. Check for known zero input bits.
2243  APInt NeededMask = DesiredMask & ~ActualMask;
2244  KnownBits Known = CurDAG->computeKnownBits(LHS);
2245 
2246  // If all the missing bits in the or are already known to be set, match!
2247  if (NeededMask.isSubsetOf(Known.One))
2248  return true;
2249 
2250  // TODO: check to see if missing bits are just not demanded.
2251 
2252  // Otherwise, this pattern doesn't match.
2253  return false;
2254 }
2255 
2256 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2257 /// by tblgen. Others should not call it.
2259  const SDLoc &DL) {
2260  std::vector<SDValue> InOps;
2261  std::swap(InOps, Ops);
2262 
2263  Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
2264  Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
2265  Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
2266  Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2267 
2268  unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2269  if (InOps[e-1].getValueType() == MVT::Glue)
2270  --e; // Don't process a glue operand if it is here.
2271 
2272  while (i != e) {
2273  unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
2274  if (!InlineAsm::isMemKind(Flags)) {
2275  // Just skip over this operand, copying the operands verbatim.
2276  Ops.insert(Ops.end(), InOps.begin()+i,
2277  InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
2278  i += InlineAsm::getNumOperandRegisters(Flags) + 1;
2279  } else {
2281  "Memory operand with multiple values?");
2282 
2283  unsigned TiedToOperand;
2284  if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
2285  // We need the constraint ID from the operand this is tied to.
2286  unsigned CurOp = InlineAsm::Op_FirstOperand;
2287  Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2288  for (; TiedToOperand; --TiedToOperand) {
2289  CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
2290  Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2291  }
2292  }
2293 
2294  // Otherwise, this is a memory operand. Ask the target to select it.
2295  std::vector<SDValue> SelOps;
2296  unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
2297  if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
2298  report_fatal_error("Could not match memory address. Inline asm"
2299  " failure!");
2300 
2301  // Add this to the output node.
2302  unsigned NewFlags =
2304  NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
2305  Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
2306  Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
2307  i += 2;
2308  }
2309  }
2310 
2311  // Add the glue input back if present.
2312  if (e != InOps.size())
2313  Ops.push_back(InOps.back());
2314 }
2315 
2316 /// findGlueUse - Return use of MVT::Glue value produced by the specified
2317 /// SDNode.
2318 ///
2320  unsigned FlagResNo = N->getNumValues()-1;
2321  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2322  SDUse &Use = I.getUse();
2323  if (Use.getResNo() == FlagResNo)
2324  return Use.getUser();
2325  }
2326  return nullptr;
2327 }
2328 
2329 /// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2330 /// beyond "ImmedUse". We may ignore chains as they are checked separately.
2331 static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2332  bool IgnoreChains) {
2335  // Only check if we have non-immediate uses of Def.
2336  if (ImmedUse->isOnlyUserOf(Def))
2337  return false;
2338 
2339  // We don't care about paths to Def that go through ImmedUse so mark it
2340  // visited and mark non-def operands as used.
2341  Visited.insert(ImmedUse);
2342  for (const SDValue &Op : ImmedUse->op_values()) {
2343  SDNode *N = Op.getNode();
2344  // Ignore chain deps (they are validated by
2345  // HandleMergeInputChains) and immediate uses
2346  if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2347  continue;
2348  if (!Visited.insert(N).second)
2349  continue;
2350  WorkList.push_back(N);
2351  }
2352 
2353  // Initialize worklist to operands of Root.
2354  if (Root != ImmedUse) {
2355  for (const SDValue &Op : Root->op_values()) {
2356  SDNode *N = Op.getNode();
2357  // Ignore chains (they are validated by HandleMergeInputChains)
2358  if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2359  continue;
2360  if (!Visited.insert(N).second)
2361  continue;
2362  WorkList.push_back(N);
2363  }
2364  }
2365 
2366  return SDNode::hasPredecessorHelper(Def, Visited, WorkList, 0, true);
2367 }
2368 
2369 /// IsProfitableToFold - Returns true if it's profitable to fold the specific
2370 /// operand node N of U during instruction selection that starts at Root.
2372  SDNode *Root) const {
2373  if (OptLevel == CodeGenOpt::None) return false;
2374  return N.hasOneUse();
2375 }
2376 
2377 /// IsLegalToFold - Returns true if the specific operand node N of
2378 /// U can be folded during instruction selection that starts at Root.
2381  bool IgnoreChains) {
2382  if (OptLevel == CodeGenOpt::None) return false;
2383 
2384  // If Root use can somehow reach N through a path that that doesn't contain
2385  // U then folding N would create a cycle. e.g. In the following
2386  // diagram, Root can reach N through X. If N is folded into Root, then
2387  // X is both a predecessor and a successor of U.
2388  //
2389  // [N*] //
2390  // ^ ^ //
2391  // / \ //
2392  // [U*] [X]? //
2393  // ^ ^ //
2394  // \ / //
2395  // \ / //
2396  // [Root*] //
2397  //
2398  // * indicates nodes to be folded together.
2399  //
2400  // If Root produces glue, then it gets (even more) interesting. Since it
2401  // will be "glued" together with its glue use in the scheduler, we need to
2402  // check if it might reach N.
2403  //
2404  // [N*] //
2405  // ^ ^ //
2406  // / \ //
2407  // [U*] [X]? //
2408  // ^ ^ //
2409  // \ \ //
2410  // \ | //
2411  // [Root*] | //
2412  // ^ | //
2413  // f | //
2414  // | / //
2415  // [Y] / //
2416  // ^ / //
2417  // f / //
2418  // | / //
2419  // [GU] //
2420  //
2421  // If GU (glue use) indirectly reaches N (the load), and Root folds N
2422  // (call it Fold), then X is a predecessor of GU and a successor of
2423  // Fold. But since Fold and GU are glued together, this will create
2424  // a cycle in the scheduling graph.
2425 
2426  // If the node has glue, walk down the graph to the "lowest" node in the
2427  // glueged set.
2428  EVT VT = Root->getValueType(Root->getNumValues()-1);
2429  while (VT == MVT::Glue) {
2430  SDNode *GU = findGlueUse(Root);
2431  if (!GU)
2432  break;
2433  Root = GU;
2434  VT = Root->getValueType(Root->getNumValues()-1);
2435 
2436  // If our query node has a glue result with a use, we've walked up it. If
2437  // the user (which has already been selected) has a chain or indirectly uses
2438  // the chain, HandleMergeInputChains will not consider it. Because of
2439  // this, we cannot ignore chains in this predicate.
2440  IgnoreChains = false;
2441  }
2442 
2443  return !findNonImmUse(Root, N.getNode(), U, IgnoreChains);
2444 }
2445 
2446 void SelectionDAGISel::Select_INLINEASM(SDNode *N, bool Branch) {
2447  SDLoc DL(N);
2448 
2449  std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2451 
2452  const EVT VTs[] = {MVT::Other, MVT::Glue};
2453  SDValue New = CurDAG->getNode(Branch ? ISD::INLINEASM_BR : ISD::INLINEASM, DL, VTs, Ops);
2454  New->setNodeId(-1);
2455  ReplaceUses(N, New.getNode());
2456  CurDAG->RemoveDeadNode(N);
2457 }
2458 
2459 void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2460  SDLoc dl(Op);
2462  const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2463  unsigned Reg =
2464  TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
2465  *CurDAG);
2466  SDValue New = CurDAG->getCopyFromReg(
2467  Op->getOperand(0), dl, Reg, Op->getValueType(0));
2468  New->setNodeId(-1);
2469  ReplaceUses(Op, New.getNode());
2470  CurDAG->RemoveDeadNode(Op);
2471 }
2472 
2473 void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2474  SDLoc dl(Op);
2476  const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2477  unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
2478  Op->getOperand(2).getValueType(),
2479  *CurDAG);
2480  SDValue New = CurDAG->getCopyToReg(
2481  Op->getOperand(0), dl, Reg, Op->getOperand(2));
2482  New->setNodeId(-1);
2483  ReplaceUses(Op, New.getNode());
2484  CurDAG->RemoveDeadNode(Op);
2485 }
2486 
2487 void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2488  CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2489 }
2490 
2491 /// GetVBR - decode a vbr encoding whose top bit is set.
2492 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline uint64_t
2493 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2494  assert(Val >= 128 && "Not a VBR");
2495  Val &= 127; // Remove first vbr bit.
2496 
2497  unsigned Shift = 7;
2498  uint64_t NextBits;
2499  do {
2500  NextBits = MatcherTable[Idx++];
2501  Val |= (NextBits&127) << Shift;
2502  Shift += 7;
2503  } while (NextBits & 128);
2504 
2505  return Val;
2506 }
2507 
2508 /// When a match is complete, this method updates uses of interior chain results
2509 /// to use the new results.
2510 void SelectionDAGISel::UpdateChains(
2511  SDNode *NodeToMatch, SDValue InputChain,
2512  SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2513  SmallVector<SDNode*, 4> NowDeadNodes;
2514 
2515  // Now that all the normal results are replaced, we replace the chain and
2516  // glue results if present.
2517  if (!ChainNodesMatched.empty()) {
2518  assert(InputChain.getNode() &&
2519  "Matched input chains but didn't produce a chain");
2520  // Loop over all of the nodes we matched that produced a chain result.
2521  // Replace all the chain results with the final chain we ended up with.
2522  for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2523  SDNode *ChainNode = ChainNodesMatched[i];
2524  // If ChainNode is null, it's because we replaced it on a previous
2525  // iteration and we cleared it out of the map. Just skip it.
2526  if (!ChainNode)
2527  continue;
2528 
2529  assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2530  "Deleted node left in chain");
2531 
2532  // Don't replace the results of the root node if we're doing a
2533  // MorphNodeTo.
2534  if (ChainNode == NodeToMatch && isMorphNodeTo)
2535  continue;
2536 
2537  SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2538  if (ChainVal.getValueType() == MVT::Glue)
2539  ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
2540  assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2542  *CurDAG, [&](SDNode *N, SDNode *E) {
2543  std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
2544  static_cast<SDNode *>(nullptr));
2545  });
2546  if (ChainNode->getOpcode() != ISD::TokenFactor)
2547  ReplaceUses(ChainVal, InputChain);
2548 
2549  // If the node became dead and we haven't already seen it, delete it.
2550  if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2551  !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
2552  NowDeadNodes.push_back(ChainNode);
2553  }
2554  }
2555 
2556  if (!NowDeadNodes.empty())
2557  CurDAG->RemoveDeadNodes(NowDeadNodes);
2558 
2559  LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2560 }
2561 
2562 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2563 /// operation for when the pattern matched at least one node with a chains. The
2564 /// input vector contains a list of all of the chained nodes that we match. We
2565 /// must determine if this is a valid thing to cover (i.e. matching it won't
2566 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2567 /// be used as the input node chain for the generated nodes.
2568 static SDValue
2570  SelectionDAG *CurDAG) {
2571 
2574  SmallVector<SDValue, 3> InputChains;
2575  unsigned int Max = 8192;
2576 
2577  // Quick exit on trivial merge.
2578  if (ChainNodesMatched.size() == 1)
2579  return ChainNodesMatched[0]->getOperand(0);
2580 
2581  // Add chains that aren't already added (internal). Peek through
2582  // token factors.
2583  std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2584  if (V.getValueType() != MVT::Other)
2585  return;
2586  if (V->getOpcode() == ISD::EntryToken)
2587  return;
2588  if (!Visited.insert(V.getNode()).second)
2589  return;
2590  if (V->getOpcode() == ISD::TokenFactor) {
2591  for (const SDValue &Op : V->op_values())
2592  AddChains(Op);
2593  } else
2594  InputChains.push_back(V);
2595  };
2596 
2597  for (auto *N : ChainNodesMatched) {
2598  Worklist.push_back(N);
2599  Visited.insert(N);
2600  }
2601 
2602  while (!Worklist.empty())
2603  AddChains(Worklist.pop_back_val()->getOperand(0));
2604 
2605  // Skip the search if there are no chain dependencies.
2606  if (InputChains.size() == 0)
2607  return CurDAG->getEntryNode();
2608 
2609  // If one of these chains is a successor of input, we must have a
2610  // node that is both the predecessor and successor of the
2611  // to-be-merged nodes. Fail.
2612  Visited.clear();
2613  for (SDValue V : InputChains)
2614  Worklist.push_back(V.getNode());
2615 
2616  for (auto *N : ChainNodesMatched)
2617  if (SDNode::hasPredecessorHelper(N, Visited, Worklist, Max, true))
2618  return SDValue();
2619 
2620  // Return merged chain.
2621  if (InputChains.size() == 1)
2622  return InputChains[0];
2623  return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2624  MVT::Other, InputChains);
2625 }
2626 
2627 /// MorphNode - Handle morphing a node in place for the selector.
2628 SDNode *SelectionDAGISel::
2629 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2630  ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2631  // It is possible we're using MorphNodeTo to replace a node with no
2632  // normal results with one that has a normal result (or we could be
2633  // adding a chain) and the input could have glue and chains as well.
2634  // In this case we need to shift the operands down.
2635  // FIXME: This is a horrible hack and broken in obscure cases, no worse
2636  // than the old isel though.
2637  int OldGlueResultNo = -1, OldChainResultNo = -1;
2638 
2639  unsigned NTMNumResults = Node->getNumValues();
2640  if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
2641  OldGlueResultNo = NTMNumResults-1;
2642  if (NTMNumResults != 1 &&
2643  Node->getValueType(NTMNumResults-2) == MVT::Other)
2644  OldChainResultNo = NTMNumResults-2;
2645  } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
2646  OldChainResultNo = NTMNumResults-1;
2647 
2648  // Call the underlying SelectionDAG routine to do the transmogrification. Note
2649  // that this deletes operands of the old node that become dead.
2650  SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
2651 
2652  // MorphNodeTo can operate in two ways: if an existing node with the
2653  // specified operands exists, it can just return it. Otherwise, it
2654  // updates the node in place to have the requested operands.
2655  if (Res == Node) {
2656  // If we updated the node in place, reset the node ID. To the isel,
2657  // this should be just like a newly allocated machine node.
2658  Res->setNodeId(-1);
2659  }
2660 
2661  unsigned ResNumResults = Res->getNumValues();
2662  // Move the glue if needed.
2663  if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2664  (unsigned)OldGlueResultNo != ResNumResults-1)
2665  ReplaceUses(SDValue(Node, OldGlueResultNo),
2666  SDValue(Res, ResNumResults - 1));
2667 
2668  if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2669  --ResNumResults;
2670 
2671  // Move the chain reference if needed.
2672  if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2673  (unsigned)OldChainResultNo != ResNumResults-1)
2674  ReplaceUses(SDValue(Node, OldChainResultNo),
2675  SDValue(Res, ResNumResults - 1));
2676 
2677  // Otherwise, no replacement happened because the node already exists. Replace
2678  // Uses of the old node with the new one.
2679  if (Res != Node) {
2680  ReplaceNode(Node, Res);
2681  } else {
2683  }
2684 
2685  return Res;
2686 }
2687 
2688 /// CheckSame - Implements OP_CheckSame.
2689 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2690 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2691  SDValue N,
2692  const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2693  // Accept if it is exactly the same as a previously recorded node.
2694  unsigned RecNo = MatcherTable[MatcherIndex++];
2695  assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2696  return N == RecordedNodes[RecNo].first;
2697 }
2698 
2699 /// CheckChildSame - Implements OP_CheckChildXSame.
2700 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2701 CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2702  SDValue N,
2703  const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes,
2704  unsigned ChildNo) {
2705  if (ChildNo >= N.getNumOperands())
2706  return false; // Match fails if out of range child #.
2707  return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
2708  RecordedNodes);
2709 }
2710 
2711 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2712 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2713 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2714  const SelectionDAGISel &SDISel) {
2715  return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
2716 }
2717 
2718 /// CheckNodePredicate - Implements OP_CheckNodePredicate.
2719 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2720 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2721  const SelectionDAGISel &SDISel, SDNode *N) {
2722  return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
2723 }
2724 
2725 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2726 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2727  SDNode *N) {
2728  uint16_t Opc = MatcherTable[MatcherIndex++];
2729  Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2730  return N->getOpcode() == Opc;
2731 }
2732 
2733 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2734 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2735  const TargetLowering *TLI, const DataLayout &DL) {
2736  MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2737  if (N.getValueType() == VT) return true;
2738 
2739  // Handle the case when VT is iPTR.
2740  return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2741 }
2742 
2743 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2744 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2745  SDValue N, const TargetLowering *TLI, const DataLayout &DL,
2746  unsigned ChildNo) {
2747  if (ChildNo >= N.getNumOperands())
2748  return false; // Match fails if out of range child #.
2749  return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
2750  DL);
2751 }
2752 
2753 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2754 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2755  SDValue N) {
2756  return cast<CondCodeSDNode>(N)->get() ==
2757  (ISD::CondCode)MatcherTable[MatcherIndex++];
2758 }
2759 
2760 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2761 CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2762  SDValue N) {
2763  if (2 >= N.getNumOperands())
2764  return false;
2765  return ::CheckCondCode(MatcherTable, MatcherIndex, N.getOperand(2));
2766 }
2767 
2768 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2769 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2770  SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2771  MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2772  if (cast<VTSDNode>(N)->getVT() == VT)
2773  return true;
2774 
2775  // Handle the case when VT is iPTR.
2776  return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
2777 }
2778 
2779 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2780 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2781  SDValue N) {
2782  int64_t Val = MatcherTable[MatcherIndex++];
2783  if (Val & 128)
2784  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2785 
2787  return C && C->getSExtValue() == Val;
2788 }
2789 
2790 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2791 CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2792  SDValue N, unsigned ChildNo) {
2793  if (ChildNo >= N.getNumOperands())
2794  return false; // Match fails if out of range child #.
2795  return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
2796 }
2797 
2798 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2799 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2800  SDValue N, const SelectionDAGISel &SDISel) {
2801  int64_t Val = MatcherTable[MatcherIndex++];
2802  if (Val & 128)
2803  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2804 
2805  if (N->getOpcode() != ISD::AND) return false;
2806 
2808  return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
2809 }
2810 
2811 LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
2812 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2813  SDValue N, const SelectionDAGISel &SDISel) {
2814  int64_t Val = MatcherTable[MatcherIndex++];
2815  if (Val & 128)
2816  Val = GetVBR(Val, MatcherTable, MatcherIndex);
2817 
2818  if (N->getOpcode() != ISD::OR) return false;
2819 
2821  return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
2822 }
2823 
2824 /// IsPredicateKnownToFail - If we know how and can do so without pushing a
2825 /// scope, evaluate the current node. If the current predicate is known to
2826 /// fail, set Result=true and return anything. If the current predicate is
2827 /// known to pass, set Result=false and return the MatcherIndex to continue
2828 /// with. If the current predicate is unknown, set Result=false and return the
2829 /// MatcherIndex to continue with.
2830 static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2831  unsigned Index, SDValue N,
2832  bool &Result,
2833  const SelectionDAGISel &SDISel,
2834  SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2835  switch (Table[Index++]) {
2836  default:
2837  Result = false;
2838  return Index-1; // Could not evaluate this predicate.
2840  Result = !::CheckSame(Table, Index, N, RecordedNodes);
2841  return Index;
2846  Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
2847  Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2848  return Index;
2850  Result = !::CheckPatternPredicate(Table, Index, SDISel);
2851  return Index;
2853  Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
2854  return Index;
2856  Result = !::CheckOpcode(Table, Index, N.getNode());
2857  return Index;
2859  Result = !::CheckType(Table, Index, N, SDISel.TLI,
2860  SDISel.CurDAG->getDataLayout());
2861  return Index;
2863  unsigned Res = Table[Index++];
2864  Result = !::CheckType(Table, Index, N.getValue(Res), SDISel.TLI,
2865  SDISel.CurDAG->getDataLayout());
2866  return Index;
2867  }
2876  Result = !::CheckChildType(
2877  Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
2878  Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
2879  return Index;
2881  Result = !::CheckCondCode(Table, Index, N);
2882  return Index;
2884  Result = !::CheckChild2CondCode(Table, Index, N);
2885  return Index;
2887  Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
2888  SDISel.CurDAG->getDataLayout());
2889  return Index;
2891  Result = !::CheckInteger(Table, Index, N);
2892  return Index;
2898  Result = !::CheckChildInteger(Table, Index, N,
2899  Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
2900  return Index;
2902  Result = !::CheckAndImm(Table, Index, N, SDISel);
2903  return Index;
2905  Result = !::CheckOrImm(Table, Index, N, SDISel);
2906  return Index;
2907  }
2908 }
2909 
2910 namespace {
2911 
2912 struct MatchScope {
2913  /// FailIndex - If this match fails, this is the index to continue with.
2914  unsigned FailIndex;
2915 
2916  /// NodeStack - The node stack when the scope was formed.
2917  SmallVector<SDValue, 4> NodeStack;
2918 
2919  /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
2920  unsigned NumRecordedNodes;
2921 
2922  /// NumMatchedMemRefs - The number of matched memref entries.
2923  unsigned NumMatchedMemRefs;
2924 
2925  /// InputChain/InputGlue - The current chain/glue
2926  SDValue InputChain, InputGlue;
2927 
2928  /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
2929  bool HasChainNodesMatched;
2930 };
2931 
2932 /// \A DAG update listener to keep the matching state
2933 /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
2934 /// change the DAG while matching. X86 addressing mode matcher is an example
2935 /// for this.
2936 class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
2937 {
2938  SDNode **NodeToMatch;
2940  SmallVectorImpl<MatchScope> &MatchScopes;
2941 
2942 public:
2943  MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
2944  SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
2946  : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
2947  RecordedNodes(RN), MatchScopes(MS) {}
2948 
2949  void NodeDeleted(SDNode *N, SDNode *E) override {
2950  // Some early-returns here to avoid the search if we deleted the node or
2951  // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
2952  // do, so it's unnecessary to update matching state at that point).
2953  // Neither of these can occur currently because we only install this
2954  // update listener during matching a complex patterns.
2955  if (!E || E->isMachineOpcode())
2956  return;
2957  // Check if NodeToMatch was updated.
2958  if (N == *NodeToMatch)
2959  *NodeToMatch = E;
2960  // Performing linear search here does not matter because we almost never
2961  // run this code. You'd have to have a CSE during complex pattern
2962  // matching.
2963  for (auto &I : RecordedNodes)
2964  if (I.first.getNode() == N)
2965  I.first.setNode(E);
2966 
2967  for (auto &I : MatchScopes)
2968  for (auto &J : I.NodeStack)
2969  if (J.getNode() == N)
2970  J.setNode(E);
2971  }
2972 };
2973 
2974 } // end anonymous namespace
2975 
2977  const unsigned char *MatcherTable,
2978  unsigned TableSize) {
2979  // FIXME: Should these even be selected? Handle these cases in the caller?
2980  switch (NodeToMatch->getOpcode()) {
2981  default:
2982  break;
2983  case ISD::EntryToken: // These nodes remain the same.
2984  case ISD::BasicBlock:
2985  case ISD::Register:
2986  case ISD::RegisterMask:
2987  case ISD::HANDLENODE:
2988  case ISD::MDNODE_SDNODE:
2989  case ISD::TargetConstant:
2990  case ISD::TargetConstantFP:
2992  case ISD::TargetFrameIndex:
2994  case ISD::MCSymbol:
2996  case ISD::TargetJumpTable:
2999  case ISD::TokenFactor:
3000  case ISD::CopyFromReg:
3001  case ISD::CopyToReg:
3002  case ISD::EH_LABEL:
3003  case ISD::ANNOTATION_LABEL:
3004  case ISD::LIFETIME_START:
3005  case ISD::LIFETIME_END:
3006  NodeToMatch->setNodeId(-1); // Mark selected.
3007  return;
3008  case ISD::AssertSext:
3009  case ISD::AssertZext:
3010  ReplaceUses(SDValue(NodeToMatch, 0), NodeToMatch->getOperand(0));
3011  CurDAG->RemoveDeadNode(NodeToMatch);
3012  return;
3013  case ISD::INLINEASM:
3014  case ISD::INLINEASM_BR:
3015  Select_INLINEASM(NodeToMatch,
3016  NodeToMatch->getOpcode() == ISD::INLINEASM_BR);
3017  return;
3018  case ISD::READ_REGISTER:
3019  Select_READ_REGISTER(NodeToMatch);
3020  return;
3021  case ISD::WRITE_REGISTER:
3022  Select_WRITE_REGISTER(NodeToMatch);
3023  return;
3024  case ISD::UNDEF:
3025  Select_UNDEF(NodeToMatch);
3026  return;
3027  }
3028 
3029  assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
3030 
3031  // Set up the node stack with NodeToMatch as the only node on the stack.
3032  SmallVector<SDValue, 8> NodeStack;
3033  SDValue N = SDValue(NodeToMatch, 0);
3034  NodeStack.push_back(N);
3035 
3036  // MatchScopes - Scopes used when matching, if a match failure happens, this
3037  // indicates where to continue checking.
3038  SmallVector<MatchScope, 8> MatchScopes;
3039 
3040  // RecordedNodes - This is the set of nodes that have been recorded by the
3041  // state machine. The second value is the parent of the node, or null if the
3042  // root is recorded.
3043  SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
3044 
3045  // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3046  // pattern.
3047  SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
3048 
3049  // These are the current input chain and glue for use when generating nodes.
3050  // Various Emit operations change these. For example, emitting a copytoreg
3051  // uses and updates these.
3052  SDValue InputChain, InputGlue;
3053 
3054  // ChainNodesMatched - If a pattern matches nodes that have input/output
3055  // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3056  // which ones they are. The result is captured into this list so that we can
3057  // update the chain results when the pattern is complete.
3058  SmallVector<SDNode*, 3> ChainNodesMatched;
3059 
3060  LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3061 
3062  // Determine where to start the interpreter. Normally we start at opcode #0,
3063  // but if the state machine starts with an OPC_SwitchOpcode, then we
3064  // accelerate the first lookup (which is guaranteed to be hot) with the
3065  // OpcodeOffset table.
3066  unsigned MatcherIndex = 0;
3067 
3068  if (!OpcodeOffset.empty()) {
3069  // Already computed the OpcodeOffset table, just index into it.
3070  if (N.getOpcode() < OpcodeOffset.size())
3071  MatcherIndex = OpcodeOffset[N.getOpcode()];
3072  LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3073 
3074  } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3075  // Otherwise, the table isn't computed, but the state machine does start
3076  // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3077  // is the first time we're selecting an instruction.
3078  unsigned Idx = 1;
3079  while (true) {
3080  // Get the size of this case.
3081  unsigned CaseSize = MatcherTable[Idx++];
3082  if (CaseSize & 128)
3083  CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
3084  if (CaseSize == 0) break;
3085 
3086  // Get the opcode, add the index to the table.
3087  uint16_t Opc = MatcherTable[Idx++];
3088  Opc |= (unsigned short)MatcherTable[Idx++] << 8;
3089  if (Opc >= OpcodeOffset.size())
3090  OpcodeOffset.resize((Opc+1)*2);
3091  OpcodeOffset[Opc] = Idx;
3092  Idx += CaseSize;
3093  }
3094 
3095  // Okay, do the lookup for the first opcode.
3096  if (N.getOpcode() < OpcodeOffset.size())
3097  MatcherIndex = OpcodeOffset[N.getOpcode()];
3098  }
3099 
3100  while (true) {
3101  assert(MatcherIndex < TableSize && "Invalid index");
3102 #ifndef NDEBUG
3103  unsigned CurrentOpcodeIndex = MatcherIndex;
3104 #endif
3105  BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
3106  switch (Opcode) {
3107  case OPC_Scope: {
3108  // Okay, the semantics of this operation are that we should push a scope
3109  // then evaluate the first child. However, pushing a scope only to have
3110  // the first check fail (which then pops it) is inefficient. If we can
3111  // determine immediately that the first check (or first several) will
3112  // immediately fail, don't even bother pushing a scope for them.
3113  unsigned FailIndex;
3114 
3115  while (true) {
3116  unsigned NumToSkip = MatcherTable[MatcherIndex++];
3117  if (NumToSkip & 128)
3118  NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3119  // Found the end of the scope with no match.
3120  if (NumToSkip == 0) {
3121  FailIndex = 0;
3122  break;
3123  }
3124 
3125  FailIndex = MatcherIndex+NumToSkip;
3126 
3127  unsigned MatcherIndexOfPredicate = MatcherIndex;
3128  (void)MatcherIndexOfPredicate; // silence warning.
3129 
3130  // If we can't evaluate this predicate without pushing a scope (e.g. if
3131  // it is a 'MoveParent') or if the predicate succeeds on this node, we
3132  // push the scope and evaluate the full predicate chain.
3133  bool Result;
3134  MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
3135  Result, *this, RecordedNodes);
3136  if (!Result)
3137  break;
3138 
3139  LLVM_DEBUG(
3140  dbgs() << " Skipped scope entry (due to false predicate) at "
3141  << "index " << MatcherIndexOfPredicate << ", continuing at "
3142  << FailIndex << "\n");
3143  ++NumDAGIselRetries;
3144 
3145  // Otherwise, we know that this case of the Scope is guaranteed to fail,
3146  // move to the next case.
3147  MatcherIndex = FailIndex;
3148  }
3149 
3150  // If the whole scope failed to match, bail.
3151  if (FailIndex == 0) break;
3152 
3153  // Push a MatchScope which indicates where to go if the first child fails
3154  // to match.
3155  MatchScope NewEntry;
3156  NewEntry.FailIndex = FailIndex;
3157  NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
3158  NewEntry.NumRecordedNodes = RecordedNodes.size();
3159  NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3160  NewEntry.InputChain = InputChain;
3161  NewEntry.InputGlue = InputGlue;
3162  NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3163  MatchScopes.push_back(NewEntry);
3164  continue;
3165  }
3166  case OPC_RecordNode: {
3167  // Remember this node, it may end up being an operand in the pattern.
3168  SDNode *Parent = nullptr;
3169  if (NodeStack.size() > 1)
3170  Parent = NodeStack[NodeStack.size()-2].getNode();
3171  RecordedNodes.push_back(std::make_pair(N, Parent));
3172  continue;
3173  }
3174 
3178  case OPC_RecordChild6: case OPC_RecordChild7: {
3179  unsigned ChildNo = Opcode-OPC_RecordChild0;
3180  if (ChildNo >= N.getNumOperands())
3181  break; // Match fails if out of range child #.
3182 
3183  RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
3184  N.getNode()));
3185  continue;
3186  }
3187  case OPC_RecordMemRef:
3188  if (auto *MN = dyn_cast<MemSDNode>(N))
3189  MatchedMemRefs.push_back(MN->getMemOperand());
3190  else {
3191  LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3192  dbgs() << '\n');
3193  }
3194 
3195  continue;
3196 
3197  case OPC_CaptureGlueInput:
3198  // If the current node has an input glue, capture it in InputGlue.
3199  if (N->getNumOperands() != 0 &&
3200  N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
3201  InputGlue = N->getOperand(N->getNumOperands()-1);
3202  continue;
3203 
3204  case OPC_MoveChild: {
3205  unsigned ChildNo = MatcherTable[MatcherIndex++];
3206  if (ChildNo >= N.getNumOperands())
3207  break; // Match fails if out of range child #.
3208  N = N.getOperand(ChildNo);
3209  NodeStack.push_back(N);
3210  continue;
3211  }
3212 
3213  case OPC_MoveChild0: case OPC_MoveChild1:
3214  case OPC_MoveChild2: case OPC_MoveChild3:
3215  case OPC_MoveChild4: case OPC_MoveChild5:
3216  case OPC_MoveChild6: case OPC_MoveChild7: {
3217  unsigned ChildNo = Opcode-OPC_MoveChild0;
3218  if (ChildNo >= N.getNumOperands())
3219  break; // Match fails if out of range child #.
3220  N = N.getOperand(ChildNo);
3221  NodeStack.push_back(N);
3222  continue;
3223  }
3224 
3225  case OPC_MoveParent:
3226  // Pop the current node off the NodeStack.
3227  NodeStack.pop_back();
3228  assert(!NodeStack.empty() && "Node stack imbalance!");
3229  N = NodeStack.back();
3230  continue;
3231 
3232  case OPC_CheckSame:
3233  if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3234  continue;
3235 
3238  if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3239  Opcode-OPC_CheckChild0Same))
3240  break;
3241  continue;
3242 
3244  if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
3245  continue;
3246  case OPC_CheckPredicate:
3247  if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
3248  N.getNode()))
3249  break;
3250  continue;
3252  unsigned OpNum = MatcherTable[MatcherIndex++];
3253  SmallVector<SDValue, 8> Operands;
3254 
3255  for (unsigned i = 0; i < OpNum; ++i)
3256  Operands.push_back(RecordedNodes[MatcherTable[MatcherIndex++]].first);
3257 
3258  unsigned PredNo = MatcherTable[MatcherIndex++];
3259  if (!CheckNodePredicateWithOperands(N.getNode(), PredNo, Operands))
3260  break;
3261  continue;
3262  }
3263  case OPC_CheckComplexPat: {
3264  unsigned CPNum = MatcherTable[MatcherIndex++];
3265  unsigned RecNo = MatcherTable[MatcherIndex++];
3266  assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3267 
3268  // If target can modify DAG during matching, keep the matching state
3269  // consistent.
3270  std::unique_ptr<MatchStateUpdater> MSU;
3272  MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3273  MatchScopes));
3274 
3275  if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
3276  RecordedNodes[RecNo].first, CPNum,
3277  RecordedNodes))
3278  break;
3279  continue;
3280  }
3281  case OPC_CheckOpcode:
3282  if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
3283  continue;
3284 
3285  case OPC_CheckType:
3286  if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
3287  CurDAG->getDataLayout()))
3288  break;
3289  continue;
3290 
3291  case OPC_CheckTypeRes: {
3292  unsigned Res = MatcherTable[MatcherIndex++];
3293  if (!::CheckType(MatcherTable, MatcherIndex, N.getValue(Res), TLI,
3294  CurDAG->getDataLayout()))
3295  break;
3296  continue;
3297  }
3298 
3299  case OPC_SwitchOpcode: {
3300  unsigned CurNodeOpcode = N.getOpcode();
3301  unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3302  unsigned CaseSize;
3303  while (true) {
3304  // Get the size of this case.
3305  CaseSize = MatcherTable[MatcherIndex++];
3306  if (CaseSize & 128)
3307  CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3308  if (CaseSize == 0) break;
3309 
3310  uint16_t Opc = MatcherTable[MatcherIndex++];
3311  Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3312 
3313  // If the opcode matches, then we will execute this case.
3314  if (CurNodeOpcode == Opc)
3315  break;
3316 
3317  // Otherwise, skip over this case.
3318  MatcherIndex += CaseSize;
3319  }
3320 
3321  // If no cases matched, bail out.
3322  if (CaseSize == 0) break;
3323 
3324  // Otherwise, execute the case we found.
3325  LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
3326  << MatcherIndex << "\n");
3327  continue;
3328  }
3329 
3330  case OPC_SwitchType: {
3331  MVT CurNodeVT = N.getSimpleValueType();
3332  unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3333  unsigned CaseSize;
3334  while (true) {
3335  // Get the size of this case.
3336  CaseSize = MatcherTable[MatcherIndex++];
3337  if (CaseSize & 128)
3338  CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3339  if (CaseSize == 0) break;
3340 
3341  MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3342  if (CaseVT == MVT::iPTR)
3343  CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
3344 
3345  // If the VT matches, then we will execute this case.
3346  if (CurNodeVT == CaseVT)
3347  break;
3348 
3349  // Otherwise, skip over this case.
3350  MatcherIndex += CaseSize;
3351  }
3352 
3353  // If no cases matched, bail out.
3354  if (CaseSize == 0) break;
3355 
3356  // Otherwise, execute the case we found.
3357  LLVM_DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
3358  << "] from " << SwitchStart << " to " << MatcherIndex
3359  << '\n');
3360  continue;
3361  }
3366  if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
3367  CurDAG->getDataLayout(),
3368  Opcode - OPC_CheckChild0Type))
3369  break;
3370  continue;
3371  case OPC_CheckCondCode:
3372  if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3373  continue;
3375  if (!::CheckChild2CondCode(MatcherTable, MatcherIndex, N)) break;
3376  continue;
3377  case OPC_CheckValueType:
3378  if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3379  CurDAG->getDataLayout()))
3380  break;
3381  continue;
3382  case OPC_CheckInteger:
3383  if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3384  continue;
3388  if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3389  Opcode-OPC_CheckChild0Integer)) break;
3390  continue;
3391  case OPC_CheckAndImm:
3392  if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
3393  continue;
3394  case OPC_CheckOrImm:
3395  if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
3396  continue;
3397  case OPC_CheckImmAllOnesV:
3398  if (!ISD::isBuildVectorAllOnes(N.getNode())) break;
3399  continue;
3400  case OPC_CheckImmAllZerosV:
3401  if (!ISD::isBuildVectorAllZeros(N.getNode())) break;
3402  continue;
3403 
3405  assert(NodeStack.size() != 1 && "No parent node");
3406  // Verify that all intermediate nodes between the root and this one have
3407  // a single use.
3408  bool HasMultipleUses = false;
3409  for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
3410  if (!NodeStack[i].getNode()->hasOneUse()) {
3411  HasMultipleUses = true;
3412  break;
3413  }
3414  if (HasMultipleUses) break;
3415 
3416  // Check to see that the target thinks this is profitable to fold and that
3417  // we can fold it without inducing cycles in the graph.
3418  if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3419  NodeToMatch) ||
3420  !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3421  NodeToMatch, OptLevel,
3422  true/*We validate our own chains*/))
3423  break;
3424 
3425  continue;
3426  }
3427  case OPC_EmitInteger: {
3429  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3430  int64_t Val = MatcherTable[MatcherIndex++];
3431  if (Val & 128)
3432  Val = GetVBR(Val, MatcherTable, MatcherIndex);
3433  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3434  CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
3435  VT), nullptr));
3436  continue;
3437  }
3438  case OPC_EmitRegister: {
3440  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3441  unsigned RegNo = MatcherTable[MatcherIndex++];
3442  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3443  CurDAG->getRegister(RegNo, VT), nullptr));
3444  continue;
3445  }
3446  case OPC_EmitRegister2: {
3447  // For targets w/ more than 256 register names, the register enum
3448  // values are stored in two bytes in the matcher table (just like
3449  // opcodes).
3451  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3452  unsigned RegNo = MatcherTable[MatcherIndex++];
3453  RegNo |= MatcherTable[MatcherIndex++] << 8;
3454  RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3455  CurDAG->getRegister(RegNo, VT), nullptr));
3456  continue;
3457  }
3458 
3459  case OPC_EmitConvertToTarget: {
3460  // Convert from IMM/FPIMM to target version.
3461  unsigned RecNo = MatcherTable[MatcherIndex++];
3462  assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3463  SDValue Imm = RecordedNodes[RecNo].first;
3464 
3465  if (Imm->getOpcode() == ISD::Constant) {
3466  const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
3467  Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
3468  Imm.getValueType());
3469  } else if (Imm->getOpcode() == ISD::ConstantFP) {
3470  const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
3471  Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
3472  Imm.getValueType());
3473  }
3474 
3475  RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
3476  continue;
3477  }
3478 
3479  case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3480  case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3481  case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3482  // These are space-optimized forms of OPC_EmitMergeInputChains.
3483  assert(!InputChain.getNode() &&
3484  "EmitMergeInputChains should be the first chain producing node");
3485  assert(ChainNodesMatched.empty() &&
3486  "Should only have one EmitMergeInputChains per match");
3487 
3488  // Read all of the chained nodes.
3489  unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3490  assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3491  ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3492 
3493  // FIXME: What if other value results of the node have uses not matched
3494  // by this pattern?
3495  if (ChainNodesMatched.back() != NodeToMatch &&
3496  !RecordedNodes[RecNo].first.hasOneUse()) {
3497  ChainNodesMatched.clear();
3498  break;
3499  }
3500 
3501  // Merge the input chains if they are not intra-pattern references.
3502  InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3503 
3504  if (!InputChain.getNode())
3505  break; // Failed to merge.
3506  continue;
3507  }
3508 
3509  case OPC_EmitMergeInputChains: {
3510  assert(!InputChain.getNode() &&
3511  "EmitMergeInputChains should be the first chain producing node");
3512  // This node gets a list of nodes we matched in the input that have
3513  // chains. We want to token factor all of the input chains to these nodes
3514  // together. However, if any of the input chains is actually one of the
3515  // nodes matched in this pattern, then we have an intra-match reference.
3516  // Ignore these because the newly token factored chain should not refer to
3517  // the old nodes.
3518  unsigned NumChains = MatcherTable[MatcherIndex++];
3519  assert(NumChains != 0 && "Can't TF zero chains");
3520 
3521  assert(ChainNodesMatched.empty() &&
3522  "Should only have one EmitMergeInputChains per match");
3523 
3524  // Read all of the chained nodes.
3525  for (unsigned i = 0; i != NumChains; ++i) {
3526  unsigned RecNo = MatcherTable[MatcherIndex++];
3527  assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3528  ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3529 
3530  // FIXME: What if other value results of the node have uses not matched
3531  // by this pattern?
3532  if (ChainNodesMatched.back() != NodeToMatch &&
3533  !RecordedNodes[RecNo].first.hasOneUse()) {
3534  ChainNodesMatched.clear();
3535  break;
3536  }
3537  }
3538 
3539  // If the inner loop broke out, the match fails.
3540  if (ChainNodesMatched.empty())
3541  break;
3542 
3543  // Merge the input chains if they are not intra-pattern references.
3544  InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3545 
3546  if (!InputChain.getNode())
3547  break; // Failed to merge.
3548 
3549  continue;
3550  }
3551 
3552  case OPC_EmitCopyToReg: {
3553  unsigned RecNo = MatcherTable[MatcherIndex++];
3554  assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3555  unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3556 
3557  if (!InputChain.getNode())
3558  InputChain = CurDAG->getEntryNode();
3559 
3560  InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
3561  DestPhysReg, RecordedNodes[RecNo].first,
3562  InputGlue);
3563 
3564  InputGlue = InputChain.getValue(1);
3565  continue;
3566  }
3567 
3568  case OPC_EmitNodeXForm: {
3569  unsigned XFormNo = MatcherTable[MatcherIndex++];
3570  unsigned RecNo = MatcherTable[MatcherIndex++];
3571  assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3572  SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
3573  RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
3574  continue;
3575  }
3576  case OPC_Coverage: {
3577  // This is emitted right before MorphNode/EmitNode.
3578  // So it should be safe to assume that this node has been selected
3579  unsigned index = MatcherTable[MatcherIndex++];
3580  index |= (MatcherTable[MatcherIndex++] << 8);
3581  dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3582  dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3583  continue;
3584  }
3585 
3586  case OPC_EmitNode: case OPC_MorphNodeTo:
3587  case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2:
3589  uint16_t TargetOpc = MatcherTable[MatcherIndex++];
3590  TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3591  unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
3592  // Get the result VT list.
3593  unsigned NumVTs;
3594  // If this is one of the compressed forms, get the number of VTs based
3595  // on the Opcode. Otherwise read the next byte from the table.
3596  if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
3597  NumVTs = Opcode - OPC_MorphNodeTo0;
3598  else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
3599  NumVTs = Opcode - OPC_EmitNode0;
3600  else
3601  NumVTs = MatcherTable[MatcherIndex++];
3602  SmallVector<EVT, 4> VTs;
3603  for (unsigned i = 0; i != NumVTs; ++i) {
3605  (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3606  if (VT == MVT::iPTR)
3607  VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
3608  VTs.push_back(VT);
3609  }
3610 
3611  if (EmitNodeInfo & OPFL_Chain)
3612  VTs.push_back(MVT::Other);
3613  if (EmitNodeInfo & OPFL_GlueOutput)
3614  VTs.push_back(MVT::Glue);
3615 
3616  // This is hot code, so optimize the two most common cases of 1 and 2
3617  // results.
3618  SDVTList VTList;
3619  if (VTs.size() == 1)
3620  VTList = CurDAG->getVTList(VTs[0]);
3621  else if (VTs.size() == 2)
3622  VTList = CurDAG->getVTList(VTs[0], VTs[1]);
3623  else
3624  VTList = CurDAG->getVTList(VTs);
3625 
3626  // Get the operand list.
3627  unsigned NumOps = MatcherTable[MatcherIndex++];
3629  for (unsigned i = 0; i != NumOps; ++i) {
3630  unsigned RecNo = MatcherTable[MatcherIndex++];
3631  if (RecNo & 128)
3632  RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
3633 
3634  assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
3635  Ops.push_back(RecordedNodes[RecNo].first);
3636  }
3637 
3638  // If there are variadic operands to add, handle them now.
3639  if (EmitNodeInfo & OPFL_VariadicInfo) {
3640  // Determine the start index to copy from.
3641  unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
3642  FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
3643  assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
3644  "Invalid variadic node");
3645  // Copy all of the variadic operands, not including a potential glue
3646  // input.
3647  for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
3648  i != e; ++i) {
3649  SDValue V = NodeToMatch->getOperand(i);
3650  if (V.getValueType() == MVT::Glue) break;
3651  Ops.push_back(V);
3652  }
3653  }
3654 
3655  // If this has chain/glue inputs, add them.
3656  if (EmitNodeInfo & OPFL_Chain)
3657  Ops.push_back(InputChain);
3658  if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
3659  Ops.push_back(InputGlue);
3660 
3661  // Create the node.
3662  MachineSDNode *Res = nullptr;
3663  bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo ||
3664  (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2);
3665  if (!IsMorphNodeTo) {
3666  // If this is a normal EmitNode command, just create the new node and
3667  // add the results to the RecordedNodes list.
3668  Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
3669  VTList, Ops);
3670 
3671  // Add all the non-glue/non-chain results to the RecordedNodes list.
3672  for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
3673  if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
3674  RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
3675  nullptr));
3676  }
3677  } else {
3678  assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
3679  "NodeToMatch was removed partway through selection");
3681  SDNode *E) {
3682  CurDAG->salvageDebugInfo(*N);
3683  auto &Chain = ChainNodesMatched;
3684  assert((!E || !is_contained(Chain, N)) &&
3685  "Chain node replaced during MorphNode");
3686  Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end());
3687  });
3688  Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList,
3689  Ops, EmitNodeInfo));
3690  }
3691 
3692  // If the node had chain/glue results, update our notion of the current
3693  // chain and glue.
3694  if (EmitNodeInfo & OPFL_GlueOutput) {
3695  InputGlue = SDValue(Res, VTs.size()-1);
3696  if (EmitNodeInfo & OPFL_Chain)
3697  InputChain = SDValue(Res, VTs.size()-2);
3698  } else if (EmitNodeInfo & OPFL_Chain)
3699  InputChain = SDValue(Res, VTs.size()-1);
3700 
3701  // If the OPFL_MemRefs glue is set on this node, slap all of the
3702  // accumulated memrefs onto it.
3703  //
3704  // FIXME: This is vastly incorrect for patterns with multiple outputs
3705  // instructions that access memory and for ComplexPatterns that match
3706  // loads.
3707  if (EmitNodeInfo & OPFL_MemRefs) {
3708  // Only attach load or store memory operands if the generated
3709  // instruction may load or store.
3710  const MCInstrDesc &MCID = TII->get(TargetOpc);
3711  bool mayLoad = MCID.mayLoad();
3712  bool mayStore = MCID.mayStore();
3713 
3714  // We expect to have relatively few of these so just filter them into a
3715  // temporary buffer so that we can easily add them to the instruction.
3716  SmallVector<MachineMemOperand *, 4> FilteredMemRefs;
3717  for (MachineMemOperand *MMO : MatchedMemRefs) {
3718  if (MMO->isLoad()) {
3719  if (mayLoad)
3720  FilteredMemRefs.push_back(MMO);
3721  } else if (MMO->isStore()) {
3722  if (mayStore)
3723  FilteredMemRefs.push_back(MMO);
3724  } else {
3725  FilteredMemRefs.push_back(MMO);
3726  }
3727  }
3728 
3729  CurDAG->setNodeMemRefs(Res, FilteredMemRefs);
3730  }
3731 
3732  LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
3733  << " Dropping mem operands\n";
3734  dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
3735  << " node: ";
3736  Res->dump(CurDAG););
3737 
3738  // If this was a MorphNodeTo then we're completely done!
3739  if (IsMorphNodeTo) {
3740  // Update chain uses.
3741  UpdateChains(Res, InputChain, ChainNodesMatched, true);
3742  return;
3743  }
3744  continue;
3745  }
3746 
3747  case OPC_CompleteMatch: {
3748  // The match has been completed, and any new nodes (if any) have been
3749  // created. Patch up references to the matched dag to use the newly
3750  // created nodes.
3751  unsigned NumResults = MatcherTable[MatcherIndex++];
3752 
3753  for (unsigned i = 0; i != NumResults; ++i) {
3754  unsigned ResSlot = MatcherTable[MatcherIndex++];
3755  if (ResSlot & 128)
3756  ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
3757 
3758  assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
3759  SDValue Res = RecordedNodes[ResSlot].first;
3760 
3761  assert(i < NodeToMatch->getNumValues() &&
3762  NodeToMatch->getValueType(i) != MVT::Other &&
3763  NodeToMatch->getValueType(i) != MVT::Glue &&
3764  "Invalid number of results to complete!");
3765  assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
3766  NodeToMatch->getValueType(i) == MVT::iPTR ||
3767  Res.getValueType() == MVT::iPTR ||
3768  NodeToMatch->getValueType(i).getSizeInBits() ==
3769  Res.getValueSizeInBits()) &&
3770  "invalid replacement");
3771  ReplaceUses(SDValue(NodeToMatch, i), Res);
3772  }
3773 
3774  // Update chain uses.
3775  UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
3776 
3777  // If the root node defines glue, we need to update it to the glue result.
3778  // TODO: This never happens in our tests and I think it can be removed /
3779  // replaced with an assert, but if we do it this the way the change is
3780  // NFC.
3781  if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
3782  MVT::Glue &&
3783  InputGlue.getNode())
3784  ReplaceUses(SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
3785  InputGlue);
3786 
3787  assert(NodeToMatch->use_empty() &&
3788  "Didn't replace all uses of the node?");
3789  CurDAG->RemoveDeadNode(NodeToMatch);
3790 
3791  return;
3792  }
3793  }
3794 
3795  // If the code reached this point, then the match failed. See if there is
3796  // another child to try in the current 'Scope', otherwise pop it until we
3797  // find a case to check.
3798  LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
3799  << "\n");
3800  ++NumDAGIselRetries;
3801  while (true) {
3802  if (MatchScopes.empty()) {
3803  CannotYetSelect(NodeToMatch);
3804  return;
3805  }
3806 
3807  // Restore the interpreter state back to the point where the scope was
3808  // formed.
3809  MatchScope &LastScope = MatchScopes.back();
3810  RecordedNodes.resize(LastScope.NumRecordedNodes);
3811  NodeStack.clear();
3812  NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
3813  N = NodeStack.back();
3814 
3815  if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
3816  MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
3817  MatcherIndex = LastScope.FailIndex;
3818 
3819  LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
3820 
3821  InputChain = LastScope.InputChain;
3822  InputGlue = LastScope.InputGlue;
3823  if (!LastScope.HasChainNodesMatched)
3824  ChainNodesMatched.clear();
3825 
3826  // Check to see what the offset is at the new MatcherIndex. If it is zero
3827  // we have reached the end of this scope, otherwise we have another child
3828  // in the current scope to try.
3829  unsigned NumToSkip = MatcherTable[MatcherIndex++];
3830  if (NumToSkip & 128)
3831  NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3832 
3833  // If we have another child in this scope to match, update FailIndex and
3834  // try it.
3835  if (NumToSkip != 0) {
3836  LastScope.FailIndex = MatcherIndex+NumToSkip;
3837  break;
3838  }
3839 
3840  // End of this scope, pop it and try the next child in the containing
3841  // scope.
3842  MatchScopes.pop_back();
3843  }
3844  }
3845 }
3846 
3848  assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
3849  auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3850  if (!C)
3851  return false;
3852 
3853  // Detect when "or" is used to add an offset to a stack object.
3854  if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) {
3855  MachineFrameInfo &MFI = MF->getFrameInfo();
3856  unsigned A = MFI.getObjectAlignment(FN->getIndex());
3857  assert(isPowerOf2_32(A) && "Unexpected alignment");
3858  int32_t Off = C->getSExtValue();
3859  // If the alleged offset fits in the zero bits guaranteed by
3860  // the alignment, then this or is really an add.
3861  return (Off >= 0) && (((A - 1) & Off) == unsigned(Off));
3862  }
3863  return false;
3864 }
3865 
3866 void SelectionDAGISel::CannotYetSelect(SDNode *N) {
3867  std::string msg;
3868  raw_string_ostream Msg(msg);
3869  Msg << "Cannot select: ";
3870 
3871  if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
3873  N->getOpcode() != ISD::INTRINSIC_VOID) {
3874  N->printrFull(Msg, CurDAG);
3875  Msg << "\nIn function: " << MF->getName();
3876  } else {
3877  bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
3878  unsigned iid =
3879  cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
3880  if (iid < Intrinsic::num_intrinsics)
3881  Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid, None);
3882  else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
3883  Msg << "target intrinsic %" << TII->getName(iid);
3884  else
3885  Msg << "unknown intrinsic #" << iid;
3886  }
3887  report_fatal_error(Msg.str());
3888 }
3889 
3890 char SelectionDAGISel::ID = 0;
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
Definition: ISDOpcodes.h:682
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
uint64_t CallInst * C
Return a value (possibly void), from a function.
std::vector< BitTestBlock > BitTestCases
Vector of BitTestBlock structures used to communicate SwitchInst code generation information.
Diagnostic information for ISel fallback path.
SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type...
SelectionDAGBuilder * SDB
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
mop_iterator operands_end()
Definition: MachineInstr.h:453
EVT getValueType() const
Return the ValueType of the referenced return value.
static SDNode * findGlueUse(SDNode *N)
findGlueUse - Return use of MVT::Glue value produced by the specified SDNode.
void EmitLiveInCopies(MachineBasicBlock *EntryMBB, const TargetRegisterInfo &TRI, const TargetInstrInfo &TII)
EmitLiveInCopies - Emit copies to initialize livein virtual registers into the given entry block...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
bool hasPostISelHook(QueryType Type=IgnoreBundle) const
Return true if this instruction requires adjustment after instruction selection by calling a target h...
Definition: MachineInstr.h:885
Diagnostic information for missed-optimization remarks.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1562
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static int getNumFixedFromVariadicInfo(unsigned Flags)
getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the number of fixed arity values ...
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
bool LegalizeTypes()
This transforms the SelectionDAG into a SelectionDAG that only uses types natively supported by the t...
DiagnosticInfoOptimizationBase::Argument NV
GCFunctionInfo * GFI
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it&#39;s profitable to fold the specific operand node N of U during ...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:41
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR...
Definition: ISDOpcodes.h:740
static unsigned virtReg2Index(unsigned Reg)
Convert a virtual register number to a 0-based index.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Various leaf nodes.
Definition: ISDOpcodes.h:59
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
static cl::opt< bool > ViewISelDAGs("view-isel-dags", cl::Hidden, cl::desc("Pop up a window to show isel dags as they are selected"))
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
unsigned getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo)
Propagate swifterror values through the machine function CFG.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const
CheckAndMask - The isel is trying to match something like (and X, 255).
const MachineFunctionProperties & getProperties() const
Get the function properties.
virtual unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const
Return the register ID of the name passed in.
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
virtual bool CheckNodePredicateWithOperands(SDNode *N, unsigned PredNo, const SmallVectorImpl< SDValue > &Operands) const
CheckNodePredicateWithOperands - This function is generated by tblgen in the target.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
static unsigned getFlagWord(unsigned Kind, unsigned NumOps)
Definition: InlineAsm.h:268
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
void initializeTargetLibraryInfoWrapperPassPass(PassRegistry &)
Clients of various APIs that cause global effects on the DAG can optionally implement this interface...
Definition: SelectionDAG.h:279
unsigned getReg() const
getReg - Returns the register number.
friend struct DAGUpdateListener
DAGUpdateListener is a friend so it can manipulate the listener stack.
Definition: SelectionDAG.h:327
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
bool LegalizeVectors()
This transforms the SelectionDAG into a SelectionDAG that only uses vector math operations supported ...
This file contains the declarations for metadata subclasses.
virtual const TargetLowering * getTargetLowering() const
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1328
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
unsigned getResNo() const
Convenience function for get().getResNo().
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
Definition: SelectionDAG.h:323
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition: ISDOpcodes.h:130
bool isTerminator() const
Definition: Instruction.h:128
unsigned second
arg_iterator arg_end()
Definition: Function.h:682
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse, bool IgnoreChains)
findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path beyond "ImmedUse"...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
Metadata node.
Definition: Metadata.h:863
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:140
F(f)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1068
An instruction for reading from memory.
Definition: Instructions.h:167
RegisterPassParser class - Handle the addition of new machine passes.
void setNodeId(int Id)
Set unique node id.
SDNode * getNode() const
get the SDNode which holds the desired result
static MachineBasicBlock::iterator FindSplitPointForStackProtector(MachineBasicBlock *BB)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:468
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:398
bool isPHI() const
void viewGraph(const std::string &Title)
Pop up a GraphViz/gv window with the DAG rendered using &#39;dot&#39;.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL, unsigned ChildNo)
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:44
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, LegacyDivergenceAnalysis *Divergence)
Prepare this SelectionDAG to process code in the given MachineFunction.
void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *, unsigned)
Set the swifterror virtual register in the SwiftErrorVRegDefMap for this basic block.
static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx)
isUseOperandTiedToDef - Return true if the flag of the inline asm operand indicates it is an use oper...
Definition: InlineAsm.h:341
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:245
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
MachineFunction * MF
ScheduleDAGSDNodes * createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createHybridListDAGScheduler - This creates a bottom up register pressure aware list scheduler that m...
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes, unsigned ChildNo)
CheckChildSame - Implements OP_CheckChildXSame.
const TargetLibraryInfo * LibInfo
void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG)
set - Initialize this FunctionLoweringInfo with the given Function and its associated MachineFunction...
static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI, FunctionLoweringInfo *FuncInfo)
Set up SwiftErrorVals by going through the function.
static DIExpression * prepend(const DIExpression *Expr, bool DerefBefore, int64_t Offset=0, bool DerefAfter=false, bool StackValue=false)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode *> &Visited, SmallVectorImpl< const SDNode *> &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1597
static bool isFoldedOrDeadInstruction(const Instruction *I, FunctionLoweringInfo *FuncInfo)
isFoldedOrDeadInstruction - Return true if the specified instruction is side-effect free and is eithe...
StackProtectorDescriptor SPDescriptor
A StackProtectorDescriptor structure used to communicate stack protector information in between Selec...
std::pair< unsigned, bool > getOrCreateSwiftErrorVRegDefAt(const Instruction *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes)
CheckSame - Implements OP_CheckSame.
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
SDValue getRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block...
AnalysisUsage & addRequired()
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:241
A description of a memory reference used in the backend.
void resetTargetOptions(const Function &F) const
Reset the target options based on the function&#39;s attributes.
bool hasBranchDivergence() const
Return true if branch divergence exists.
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:625
void visitSwitchCase(CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
Option class for critical edge splitting.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
virtual bool ComplexPatternFuncMutatesDAG() const
Return true if complex patterns for this target can mutate the DAG.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
This class is basically a combination of TimeRegion and Timer.
Definition: Timer.h:160
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
const Value * SwiftErrorArg
The swifterror argument of the current function.
bool isKnownWindowsMSVCEnvironment() const
Checks if the environment is MSVC.
Definition: Triple.h:534
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
void visitJumpTable(JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
const TargetLowering * TLI
#define LLVM_ATTRIBUTE_ALWAYS_INLINE
LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do so, mark a method "always...
Definition: Compiler.h:213
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:169
const MDNode * getMD() const
virtual bool enableMachineSchedDefaultSched() const
True if the machine scheduler should disable the TLI preference for preRA scheduling with the source ...
op_iterator op_end() const
virtual void Select(SDNode *N)=0
Main hook for targets to transform nodes into machine nodes.
ScheduleDAGSDNodes *(*)(SelectionDAGISel *, CodeGenOpt::Level) FunctionPassCtor
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:462
static void EnforceNodeIdInvariant(SDNode *N)
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:403
An analysis pass which caches information about the entire Module.
Definition: GCMetadata.h:152
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDNode *N)
void setFunctionLoweringInfo(FunctionLoweringInfo *FuncInfo)
Definition: SelectionDAG.h:392
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This file implements a class to represent arbitrary precision integral constant values and operations...
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:668
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
unsigned DAGSize
DAGSize - Size of DAG being instruction selected.
void initializeAAResultsWrapperPassPass(PassRegistry &)
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
defusechain_iterator - This class provides iterator support for machine operands in the function that...
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
int64_t getSExtValue() const
Key
PAL metadata keys.
virtual StringRef getPatternForIndex(unsigned index)
getPatternForIndex - Patterns selected by tablegen during ISEL
static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI)
static void InvalidateNodeId(SDNode *N)
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:888
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:477
bool canTrap() const
Return true if evaluation of this constant could trap.
Definition: Constants.cpp:442
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:730
bool isStatepoint(const CallBase *Call)
Definition: Statepoint.cpp:20
SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
#define T
ScheduleDAGSDNodes * createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up register usage reduction list scheduler...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions())
If this edge is a critical edge, insert a new node to split the critical edge.
static unsigned IsPredicateKnownToFail(const unsigned char *Table, unsigned Index, SDValue N, bool &Result, const SelectionDAGISel &SDISel, SmallVectorImpl< std::pair< SDValue, SDNode *>> &RecordedNodes)
IsPredicateKnownToFail - If we know how and can do so without pushing a scope, evaluate the current n...
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
An instruction for storing to memory.
Definition: Instructions.h:320
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:968
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they&#39;re not in a MachineFuncti...
op_iterator op_begin() const
MachinePassRegistry - Track the registration of machine passes.
void init(GCFunctionInfo *gfi, AliasAnalysis *AA, const TargetLibraryInfo *li)
virtual const TargetInstrInfo * getInstrInfo() const
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
Definition: ISDOpcodes.h:124
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:581
static MachinePassRegistry< FunctionPassCtor > Registry
RegisterScheduler class - Track the registration of instruction schedulers.
static LLVM_ATTRIBUTE_ALWAYS_INLINE uint64_t GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx)
GetVBR - decode a vbr encoding whose top bit is set.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
Definition: SelectionDAG.h:613
Legacy analysis pass which computes BranchProbabilityInfo.
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1251
void printrFull(raw_ostream &O, const SelectionDAG *G=nullptr) const
Print a SelectionDAG node and all children down to the leaves.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
static cl::opt< bool > EnableFastISelFallbackReport("fast-isel-report-on-fallback", cl::Hidden, cl::desc("Emit a diagnostic when \ast\instruction selection " "falls back to SelectionDAG."))
UNDEF - An undefined node.
Definition: ISDOpcodes.h:177
static cl::opt< bool > ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden, cl::desc("Pop up a window to show dags before the post legalize types" " dag combine pass"))
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
void finishBasicBlock()
Flush the local value map and sink local values if possible.
Definition: FastISel.cpp:144
TargetInstrInfo - Interface to description of machine instruction set.
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:850
MachineRegisterInfo * RegInfo
void clear()
clear - Clear out all the function-specific state.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
const BasicBlock & getEntryBlock() const
Definition: Function.h:642
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
static SDValue HandleMergeInputChains(SmallVectorImpl< SDNode *> &ChainNodesMatched, SelectionDAG *CurDAG)
HandleMergeInputChains - This implements the OPC_EmitMergeInputChains operation for when the pattern ...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
CriticalEdgeSplittingOptions & setMergeIdenticalEdges()
static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F, MachineModuleInfo &MMI)
virtual bool CheckPatternPredicate(unsigned PredNo) const
CheckPatternPredicate - This function is generated by tblgen in the target.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:427
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry &)
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
CodeGenOpt::Level OptLevel
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:189
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
Definition: ISDOpcodes.h:84
Wrapper pass for TargetTransformInfo.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned const MachineRegisterInfo * MRI
ScheduleDAGSDNodes * createVLIWDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createVLIWDAGScheduler - Scheduler for VLIW targets.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static void preassignSwiftErrorRegs(const TargetLowering *TLI, FunctionLoweringInfo *FuncInfo, BasicBlock::const_iterator Begin, BasicBlock::const_iterator End)
void Legalize()
This transforms the SelectionDAG into a SelectionDAG that is compatible with the target instruction s...
virtual unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
ScheduleDAGSDNodes * createDefaultScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createDefaultScheduler - This creates an instruction scheduler appropriate for the target...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1507
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
std::pair< unsigned, bool > getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:544
iterator_range< value_op_iterator > op_values() const
void setUsesMSVCFloatingPoint(bool b)
const SDValue & getOperand(unsigned Num) const
ScheduleDAGSDNodes * createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createILPListDAGScheduler - This creates a bottom up register pressure aware list scheduler that trie...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
DIExpression * getExpression() const
virtual unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:335
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Represent the analysis usage information of a pass.
void Combine(CombineLevel Level, AliasAnalysis *AA, CodeGenOpt::Level OptLevel)
This iterates over the nodes in the SelectionDAG, folding certain types of nodes together, or eliminating superfluous nodes.
This class provides iterator support for SDUse operands that use a specific SDNode.
use_instr_iterator use_instr_begin(unsigned RegNo) const
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2312
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, unsigned ChildNo)
static unsigned getMemoryConstraintID(unsigned Flag)
Definition: InlineAsm.h:328
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:146
static void replace(Module &M, GlobalVariable *Old, GlobalVariable *New)
static cl::opt< bool > UseMBPI("use-mbpi", cl::desc("use Machine Branch Probability Info"), cl::init(true), cl::Hidden)
const APInt & getAPIntValue() const
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
const Triple & getTargetTriple() const
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:56
arg_iterator arg_begin()
Definition: Function.h:673
void setHasInlineAsm(bool B)
Set a flag that indicates that the function contains inline assembly.
virtual void PostprocessISelDAG()
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection...
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
self_iterator getIterator()
Definition: ilist_node.h:81
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits, const KnownBits &Known)
AddLiveOutRegInfo - Adds LiveOutInfo for a register.
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
void getLocation(StringRef &RelativePath, unsigned &Line, unsigned &Column) const
Return location information for this diagnostic in three parts: the relative source file path...
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1213
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint)
Augment an existing flag word returned by getFlagWord with the constraint code for a memory constrain...
Definition: InlineAsm.h:311
static cl::opt< int > EnableFastISelAbort("fast-isel-abort", cl::Hidden, cl::desc("Enable abort calls when \ast\instruction selection " "fails to lower an instruction: 0 disable the abort, 1 will " "abort but for args, calls and terminators, 2 will also " "abort for argument lowering, and 3 will never fallback " "to SelectionDAG."))
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
bool succ_empty(const Instruction *I)
Definition: CFG.h:253
SmallPtrSet< const BasicBlock *, 4 > VisitedBBs
VisitedBBs - The set of basic blocks visited thus far by instruction selection.
static void reportFastISelFailure(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R, bool ShouldAbort)
static bool isMemKind(unsigned Flag)
Definition: InlineAsm.h:276
bool isCopy() const
Extended Value Type.
Definition: ValueTypes.h:33
bool isImplicitDef() const
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:50
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
void ComputePHILiveOutRegInfo(const PHINode *)
ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI&#39;s destination register based on the LiveOutI...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
static void SplitCriticalSideEffectEdges(Function &Fn, DominatorTree *DT, LoopInfo *LI)
SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that may trap on it...
unsigned getNumOperands() const
Return the number of values used by this operation.
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:498
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
HANDLENODE node - Used as a handle for various purposes.
Definition: ISDOpcodes.h:754
iterator_range< IterTy > args() const
Definition: CallSite.h:222
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
MachineBasicBlock * MBB
MBB - The current block.
virtual bool enableMachineScheduler() const
True if the subtarget should run MachineScheduler after aggressive coalescing.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:43
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:676
unsigned first
bool isOrEquivalentToAdd(const SDNode *N) const
TargetIntrinsicInfo - Interface to description of machine instruction set.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:530
bool isGCRelocate(const CallBase *Call)
Definition: Statepoint.cpp:36
bool use_empty() const
Return true if there are no uses of this node.
bool SplitCSR
True if part of the CSRs will be handled via explicit copies.
static void mapWasmLandingPadIndex(MachineBasicBlock *MBB, const CatchPadInst *CPI)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef...
void dump() const
Dump this node, for debugging.