LLVM 19.0.0git
SelectionDAGISel.cpp
Go to the documentation of this file.
1//===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAGISel class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "ScheduleDAGSDNodes.h"
15#include "SelectionDAGBuilder.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/CFG.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
69#include "llvm/IR/DebugLoc.h"
72#include "llvm/IR/Function.h"
73#include "llvm/IR/InlineAsm.h"
75#include "llvm/IR/Instruction.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/IntrinsicsWebAssembly.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Module.h"
82#include "llvm/IR/PrintPasses.h"
83#include "llvm/IR/Statepoint.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/User.h"
86#include "llvm/IR/Value.h"
88#include "llvm/MC/MCInstrDesc.h"
89#include "llvm/Pass.h"
95#include "llvm/Support/Debug.h"
98#include "llvm/Support/Timer.h"
104#include <algorithm>
105#include <cassert>
106#include <cstdint>
107#include <iterator>
108#include <limits>
109#include <memory>
110#include <optional>
111#include <string>
112#include <utility>
113#include <vector>
114
115using namespace llvm;
116
117#define DEBUG_TYPE "isel"
118#define ISEL_DUMP_DEBUG_TYPE DEBUG_TYPE "-dump"
119
120STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
121STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
122STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
123STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
124STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
125STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
126STATISTIC(NumFastIselFailLowerArguments,
127 "Number of entry blocks where fast isel failed to lower arguments");
128
130 "fast-isel-abort", cl::Hidden,
131 cl::desc("Enable abort calls when \"fast\" instruction selection "
132 "fails to lower an instruction: 0 disable the abort, 1 will "
133 "abort but for args, calls and terminators, 2 will also "
134 "abort for argument lowering, and 3 will never fallback "
135 "to SelectionDAG."));
136
138 "fast-isel-report-on-fallback", cl::Hidden,
139 cl::desc("Emit a diagnostic when \"fast\" instruction selection "
140 "falls back to SelectionDAG."));
141
142static cl::opt<bool>
143UseMBPI("use-mbpi",
144 cl::desc("use Machine Branch Probability Info"),
145 cl::init(true), cl::Hidden);
146
147#ifndef NDEBUG
150 cl::desc("Only display the basic block whose name "
151 "matches this for all view-*-dags options"));
152static cl::opt<bool>
153ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
154 cl::desc("Pop up a window to show dags before the first "
155 "dag combine pass"));
156static cl::opt<bool>
157ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
158 cl::desc("Pop up a window to show dags before legalize types"));
159static cl::opt<bool>
160 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
161 cl::desc("Pop up a window to show dags before the post "
162 "legalize types dag combine pass"));
163static cl::opt<bool>
164 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
165 cl::desc("Pop up a window to show dags before legalize"));
166static cl::opt<bool>
167ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
168 cl::desc("Pop up a window to show dags before the second "
169 "dag combine pass"));
170static cl::opt<bool>
171ViewISelDAGs("view-isel-dags", cl::Hidden,
172 cl::desc("Pop up a window to show isel dags as they are selected"));
173static cl::opt<bool>
174ViewSchedDAGs("view-sched-dags", cl::Hidden,
175 cl::desc("Pop up a window to show sched dags as they are processed"));
176static cl::opt<bool>
177ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
178 cl::desc("Pop up a window to show SUnit dags after they are processed"));
179#else
180static const bool ViewDAGCombine1 = false, ViewLegalizeTypesDAGs = false,
181 ViewDAGCombineLT = false, ViewLegalizeDAGs = false,
182 ViewDAGCombine2 = false, ViewISelDAGs = false,
183 ViewSchedDAGs = false, ViewSUnitDAGs = false;
184#endif
185
186#ifndef NDEBUG
187#define ISEL_DUMP(X) \
188 do { \
189 if (llvm::DebugFlag && \
190 (isCurrentDebugType(DEBUG_TYPE) || \
191 (isCurrentDebugType(ISEL_DUMP_DEBUG_TYPE) && MatchFilterFuncName))) { \
192 X; \
193 } \
194 } while (false)
195#else
196#define ISEL_DUMP(X) do { } while (false)
197#endif
198
199//===---------------------------------------------------------------------===//
200///
201/// RegisterScheduler class - Track the registration of instruction schedulers.
202///
203//===---------------------------------------------------------------------===//
206
207//===---------------------------------------------------------------------===//
208///
209/// ISHeuristic command line option for instruction schedulers.
210///
211//===---------------------------------------------------------------------===//
214ISHeuristic("pre-RA-sched",
216 cl::desc("Instruction schedulers available (before register"
217 " allocation):"));
218
220defaultListDAGScheduler("default", "Best scheduler for the target",
222
223static bool dontUseFastISelFor(const Function &Fn) {
224 // Don't enable FastISel for functions with swiftasync Arguments.
225 // Debug info on those is reliant on good Argument lowering, and FastISel is
226 // not capable of lowering the entire function. Mixing the two selectors tend
227 // to result in poor lowering of Arguments.
228 return any_of(Fn.args(), [](const Argument &Arg) {
229 return Arg.hasAttribute(Attribute::AttrKind::SwiftAsync);
230 });
231}
232
233namespace llvm {
234
235 //===--------------------------------------------------------------------===//
236 /// This class is used by SelectionDAGISel to temporarily override
237 /// the optimization level on a per-function basis.
240 CodeGenOptLevel SavedOptLevel;
241 bool SavedFastISel;
242
243 public:
245 : IS(ISel) {
246 SavedOptLevel = IS.OptLevel;
247 SavedFastISel = IS.TM.Options.EnableFastISel;
248 if (NewOptLevel != SavedOptLevel) {
249 IS.OptLevel = NewOptLevel;
250 IS.TM.setOptLevel(NewOptLevel);
251 LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
252 << IS.MF->getFunction().getName() << "\n");
253 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(SavedOptLevel)
254 << " ; After: -O" << static_cast<int>(NewOptLevel)
255 << "\n");
256 if (NewOptLevel == CodeGenOptLevel::None)
258 }
260 IS.TM.setFastISel(false);
262 dbgs() << "\tFastISel is "
263 << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
264 << "\n");
265 }
266
268 if (IS.OptLevel == SavedOptLevel)
269 return;
270 LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
271 << IS.MF->getFunction().getName() << "\n");
272 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(IS.OptLevel)
273 << " ; After: -O" << static_cast<int>(SavedOptLevel) << "\n");
274 IS.OptLevel = SavedOptLevel;
275 IS.TM.setOptLevel(SavedOptLevel);
276 IS.TM.setFastISel(SavedFastISel);
277 }
278 };
279
280 //===--------------------------------------------------------------------===//
281 /// createDefaultScheduler - This creates an instruction scheduler appropriate
282 /// for the target.
284 CodeGenOptLevel OptLevel) {
285 const TargetLowering *TLI = IS->TLI;
286 const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
287
288 // Try first to see if the Target has its own way of selecting a scheduler
289 if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
290 return SchedulerCtor(IS, OptLevel);
291 }
292
293 if (OptLevel == CodeGenOptLevel::None ||
294 (ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
296 return createSourceListDAGScheduler(IS, OptLevel);
298 return createBURRListDAGScheduler(IS, OptLevel);
300 return createHybridListDAGScheduler(IS, OptLevel);
302 return createVLIWDAGScheduler(IS, OptLevel);
304 return createFastDAGScheduler(IS, OptLevel);
306 return createDAGLinearizer(IS, OptLevel);
308 "Unknown sched type!");
309 return createILPListDAGScheduler(IS, OptLevel);
310 }
311
312} // end namespace llvm
313
316 MachineBasicBlock *MBB) const {
317#ifndef NDEBUG
318 dbgs() << "If a target marks an instruction with "
319 "'usesCustomInserter', it must implement "
320 "TargetLowering::EmitInstrWithCustomInserter!\n";
321#endif
322 llvm_unreachable(nullptr);
323}
324
326 SDNode *Node) const {
327 assert(!MI.hasPostISelHook() &&
328 "If a target marks an instruction with 'hasPostISelHook', "
329 "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
330}
331
332//===----------------------------------------------------------------------===//
333// SelectionDAGISel code
334//===----------------------------------------------------------------------===//
335
337 char &ID, std::unique_ptr<SelectionDAGISel> S)
338 : MachineFunctionPass(ID), Selector(std::move(S)) {
344}
345
347 // If we already selected that function, we do not need to run SDISel.
348 if (MF.getProperties().hasProperty(
350 return false;
351
352 // Do some sanity-checking on the command-line options.
353 if (EnableFastISelAbort && !Selector->TM.Options.EnableFastISel)
354 report_fatal_error("-fast-isel-abort > 0 requires -fast-isel");
355
356 // Decide what flavour of variable location debug-info will be used, before
357 // we change the optimisation level.
359
360 // Reset the target options before resetting the optimization
361 // level below.
362 // FIXME: This is a horrible hack and should be processed via
363 // codegen looking at the optimization level explicitly when
364 // it wants to look at it.
365 Selector->TM.resetTargetOptions(MF.getFunction());
366 // Reset OptLevel to None for optnone functions.
367 CodeGenOptLevel NewOptLevel = skipFunction(MF.getFunction())
369 : Selector->OptLevel;
370
371 Selector->MF = &MF;
372 OptLevelChanger OLC(*Selector, NewOptLevel);
373 Selector->initializeAnalysisResults(*this);
374 return Selector->runOnMachineFunction(MF);
375}
376
378 : TM(tm), FuncInfo(new FunctionLoweringInfo()),
379 SwiftError(new SwiftErrorValueTracking()),
380 CurDAG(new SelectionDAG(tm, OL)),
381 SDB(std::make_unique<SelectionDAGBuilder>(*CurDAG, *FuncInfo, *SwiftError,
382 OL)),
383 OptLevel(OL) {
389}
390
392 delete CurDAG;
393 delete SwiftError;
394}
395
397 CodeGenOptLevel OptLevel = Selector->OptLevel;
398 if (OptLevel != CodeGenOptLevel::None)
404#ifndef NDEBUG
406#endif
408 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
411 // AssignmentTrackingAnalysis only runs if assignment tracking is enabled for
412 // the module.
415 if (OptLevel != CodeGenOptLevel::None)
418}
419
420static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
421 MachineModuleInfo &MMI) {
422 // Only needed for MSVC
423 if (!TT.isWindowsMSVCEnvironment())
424 return;
425
426 // If it's already set, nothing to do.
427 if (MMI.usesMSVCFloatingPoint())
428 return;
429
430 for (const Instruction &I : instructions(F)) {
431 if (I.getType()->isFPOrFPVectorTy()) {
432 MMI.setUsesMSVCFloatingPoint(true);
433 return;
434 }
435 for (const auto &Op : I.operands()) {
436 if (Op->getType()->isFPOrFPVectorTy()) {
437 MMI.setUsesMSVCFloatingPoint(true);
438 return;
439 }
440 }
441 }
442}
443
447 // If we already selected that function, we do not need to run SDISel.
448 if (MF.getProperties().hasProperty(
450 return PreservedAnalyses::all();
451
452 // Do some sanity-checking on the command-line options.
453 if (EnableFastISelAbort && !Selector->TM.Options.EnableFastISel)
454 report_fatal_error("-fast-isel-abort > 0 requires -fast-isel");
455
456 // Decide what flavour of variable location debug-info will be used, before
457 // we change the optimisation level.
459
460 // Reset the target options before resetting the optimization
461 // level below.
462 // FIXME: This is a horrible hack and should be processed via
463 // codegen looking at the optimization level explicitly when
464 // it wants to look at it.
465 Selector->TM.resetTargetOptions(MF.getFunction());
466 // Reset OptLevel to None for optnone functions.
467 // TODO: Add a function analysis to handle this.
468 Selector->MF = &MF;
469 // Reset OptLevel to None for optnone functions.
470 CodeGenOptLevel NewOptLevel = MF.getFunction().hasOptNone()
472 : Selector->OptLevel;
473
474 OptLevelChanger OLC(*Selector, NewOptLevel);
475 Selector->initializeAnalysisResults(MFAM);
476 Selector->runOnMachineFunction(MF);
477
479}
480
484 .getManager();
486 Function &Fn = MF->getFunction();
487#ifndef NDEBUG
488 FuncName = Fn.getName();
490#else
492#endif
493
496 RegInfo = &MF->getRegInfo();
498 GFI = Fn.hasGC() ? &FAM.getResult<GCFunctionAnalysis>(Fn) : nullptr;
499 ORE = std::make_unique<OptimizationRemarkEmitter>(&Fn);
501 auto *PSI = MAMP.getCachedResult<ProfileSummaryAnalysis>(*Fn.getParent());
502 BlockFrequencyInfo *BFI = nullptr;
504 if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOptLevel::None)
506
507 FunctionVarLocs const *FnVarLocs = nullptr;
510
512 CurDAG->init(*MF, *ORE, MFAM, LibInfo, UA, PSI, BFI, FnVarLocs);
513
514 // Now get the optional analyzes if we want to.
515 // This is based on the possibly changed OptLevel (after optnone is taken
516 // into account). That's unfortunate but OK because it just means we won't
517 // ask for passes that have been required anyway.
518
521 else
522 FuncInfo->BPI = nullptr;
523
525 AA = &FAM.getResult<AAManager>(Fn);
526 else
527 AA = nullptr;
528
530
531#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
533#endif
534}
535
537 Function &Fn = MF->getFunction();
538#ifndef NDEBUG
539 FuncName = Fn.getName();
541#else
543#endif
544
547 RegInfo = &MF->getRegInfo();
549 GFI = Fn.hasGC() ? &MFP.getAnalysis<GCModuleInfo>().getFunctionInfo(Fn)
550 : nullptr;
551 ORE = std::make_unique<OptimizationRemarkEmitter>(&Fn);
552 AC = &MFP.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(Fn);
553 auto *PSI = &MFP.getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
554 BlockFrequencyInfo *BFI = nullptr;
555 if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOptLevel::None)
556 BFI = &MFP.getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
557
558 FunctionVarLocs const *FnVarLocs = nullptr;
560 FnVarLocs = MFP.getAnalysis<AssignmentTrackingAnalysis>().getResults();
561
562 UniformityInfo *UA = nullptr;
563 if (auto *UAPass = MFP.getAnalysisIfAvailable<UniformityInfoWrapperPass>())
564 UA = &UAPass->getUniformityInfo();
565 CurDAG->init(*MF, *ORE, &MFP, LibInfo, UA, PSI, BFI, FnVarLocs);
566
567 // Now get the optional analyzes if we want to.
568 // This is based on the possibly changed OptLevel (after optnone is taken
569 // into account). That's unfortunate but OK because it just means we won't
570 // ask for passes that have been required anyway.
571
573 FuncInfo->BPI =
575 else
576 FuncInfo->BPI = nullptr;
577
579 AA = &MFP.getAnalysis<AAResultsWrapperPass>().getAAResults();
580 else
581 AA = nullptr;
582
583 SP = &MFP.getAnalysis<StackProtector>().getLayoutInfo();
584
585#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
587#endif
588}
589
592 const Function &Fn = mf.getFunction();
593
594 bool InstrRef = mf.shouldUseDebugInstrRef();
595
596 FuncInfo->set(MF->getFunction(), *MF, CurDAG);
597
598 ISEL_DUMP(dbgs() << "\n\n\n=== " << FuncName << '\n');
599
600 SDB->init(GFI, AA, AC, LibInfo);
601
602 MF->setHasInlineAsm(false);
603
604 FuncInfo->SplitCSR = false;
605
606 // We split CSR if the target supports it for the given function
607 // and the function has only return exits.
609 FuncInfo->SplitCSR = true;
610
611 // Collect all the return blocks.
612 for (const BasicBlock &BB : Fn) {
613 if (!succ_empty(&BB))
614 continue;
615
616 const Instruction *Term = BB.getTerminator();
617 if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
618 continue;
619
620 // Bail out if the exit block is not Return nor Unreachable.
621 FuncInfo->SplitCSR = false;
622 break;
623 }
624 }
625
626 MachineBasicBlock *EntryMBB = &MF->front();
627 if (FuncInfo->SplitCSR)
628 // This performs initialization so lowering for SplitCSR will be correct.
629 TLI->initializeSplitCSR(EntryMBB);
630
631 SelectAllBasicBlocks(Fn);
633 DiagnosticInfoISelFallback DiagFallback(Fn);
634 Fn.getContext().diagnose(DiagFallback);
635 }
636
637 // Replace forward-declared registers with the registers containing
638 // the desired value.
639 // Note: it is important that this happens **before** the call to
640 // EmitLiveInCopies, since implementations can skip copies of unused
641 // registers. If we don't apply the reg fixups before, some registers may
642 // appear as unused and will be skipped, resulting in bad MI.
644 for (DenseMap<Register, Register>::iterator I = FuncInfo->RegFixups.begin(),
645 E = FuncInfo->RegFixups.end();
646 I != E; ++I) {
647 Register From = I->first;
648 Register To = I->second;
649 // If To is also scheduled to be replaced, find what its ultimate
650 // replacement is.
651 while (true) {
652 DenseMap<Register, Register>::iterator J = FuncInfo->RegFixups.find(To);
653 if (J == E)
654 break;
655 To = J->second;
656 }
657 // Make sure the new register has a sufficiently constrained register class.
658 if (From.isVirtual() && To.isVirtual())
659 MRI.constrainRegClass(To, MRI.getRegClass(From));
660 // Replace it.
661
662 // Replacing one register with another won't touch the kill flags.
663 // We need to conservatively clear the kill flags as a kill on the old
664 // register might dominate existing uses of the new register.
665 if (!MRI.use_empty(To))
666 MRI.clearKillFlags(From);
667 MRI.replaceRegWith(From, To);
668 }
669
670 // If the first basic block in the function has live ins that need to be
671 // copied into vregs, emit the copies into the top of the block before
672 // emitting the code for the block.
674 RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
675
676 // Insert copies in the entry block and the return blocks.
677 if (FuncInfo->SplitCSR) {
679 // Collect all the return blocks.
680 for (MachineBasicBlock &MBB : mf) {
681 if (!MBB.succ_empty())
682 continue;
683
685 if (Term != MBB.end() && Term->isReturn()) {
686 Returns.push_back(&MBB);
687 continue;
688 }
689 }
690 TLI->insertCopiesSplitCSR(EntryMBB, Returns);
691 }
692
694 if (!FuncInfo->ArgDbgValues.empty())
695 for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
696 if (LI.second)
697 LiveInMap.insert(LI);
698
699 // Insert DBG_VALUE instructions for function arguments to the entry block.
700 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
701 MachineInstr *MI = FuncInfo->ArgDbgValues[e - i - 1];
702 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
703 "Function parameters should not be described by DBG_VALUE_LIST.");
704 bool hasFI = MI->getDebugOperand(0).isFI();
705 Register Reg =
706 hasFI ? TRI.getFrameRegister(*MF) : MI->getDebugOperand(0).getReg();
707 if (Reg.isPhysical())
708 EntryMBB->insert(EntryMBB->begin(), MI);
709 else {
710 MachineInstr *Def = RegInfo->getVRegDef(Reg);
711 if (Def) {
712 MachineBasicBlock::iterator InsertPos = Def;
713 // FIXME: VR def may not be in entry block.
714 Def->getParent()->insert(std::next(InsertPos), MI);
715 } else
716 LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
717 << Register::virtReg2Index(Reg) << "\n");
718 }
719
720 // Don't try and extend through copies in instruction referencing mode.
721 if (InstrRef)
722 continue;
723
724 // If Reg is live-in then update debug info to track its copy in a vreg.
725 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
726 if (LDI != LiveInMap.end()) {
727 assert(!hasFI && "There's no handling of frame pointer updating here yet "
728 "- add if needed");
729 MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
730 MachineBasicBlock::iterator InsertPos = Def;
731 const MDNode *Variable = MI->getDebugVariable();
732 const MDNode *Expr = MI->getDebugExpression();
733 DebugLoc DL = MI->getDebugLoc();
734 bool IsIndirect = MI->isIndirectDebugValue();
735 if (IsIndirect)
736 assert(MI->getDebugOffset().getImm() == 0 &&
737 "DBG_VALUE with nonzero offset");
738 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
739 "Expected inlined-at fields to agree");
740 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
741 "Didn't expect to see a DBG_VALUE_LIST here");
742 // Def is never a terminator here, so it is ok to increment InsertPos.
743 BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
744 IsIndirect, LDI->second, Variable, Expr);
745
746 // If this vreg is directly copied into an exported register then
747 // that COPY instructions also need DBG_VALUE, if it is the only
748 // user of LDI->second.
749 MachineInstr *CopyUseMI = nullptr;
750 for (MachineInstr &UseMI : RegInfo->use_instructions(LDI->second)) {
751 if (UseMI.isDebugValue())
752 continue;
753 if (UseMI.isCopy() && !CopyUseMI && UseMI.getParent() == EntryMBB) {
754 CopyUseMI = &UseMI;
755 continue;
756 }
757 // Otherwise this is another use or second copy use.
758 CopyUseMI = nullptr;
759 break;
760 }
761 if (CopyUseMI &&
762 TRI.getRegSizeInBits(LDI->second, MRI) ==
763 TRI.getRegSizeInBits(CopyUseMI->getOperand(0).getReg(), MRI)) {
764 // Use MI's debug location, which describes where Variable was
765 // declared, rather than whatever is attached to CopyUseMI.
766 MachineInstr *NewMI =
767 BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
768 CopyUseMI->getOperand(0).getReg(), Variable, Expr);
769 MachineBasicBlock::iterator Pos = CopyUseMI;
770 EntryMBB->insertAfter(Pos, NewMI);
771 }
772 }
773 }
774
775 // For debug-info, in instruction referencing mode, we need to perform some
776 // post-isel maintenence.
777 if (MF->useDebugInstrRef())
779
780 // Determine if there are any calls in this machine function.
782 for (const auto &MBB : *MF) {
783 if (MFI.hasCalls() && MF->hasInlineAsm())
784 break;
785
786 for (const auto &MI : MBB) {
787 const MCInstrDesc &MCID = TII->get(MI.getOpcode());
788 if ((MCID.isCall() && !MCID.isReturn()) ||
789 MI.isStackAligningInlineAsm()) {
790 MFI.setHasCalls(true);
791 }
792 if (MI.isInlineAsm()) {
793 MF->setHasInlineAsm(true);
794 }
795 }
796 }
797
798 // Determine if floating point is used for msvc
800
801 // Release function-specific state. SDB and CurDAG are already cleared
802 // at this point.
803 FuncInfo->clear();
804
805 ISEL_DUMP(dbgs() << "*** MachineFunction at end of ISel ***\n");
806 ISEL_DUMP(MF->print(dbgs()));
807
808 return true;
809}
810
814 bool ShouldAbort) {
815 // Print the function name explicitly if we don't have a debug location (which
816 // makes the diagnostic less useful) or if we're going to emit a raw error.
817 if (!R.getLocation().isValid() || ShouldAbort)
818 R << (" (in function: " + MF.getName() + ")").str();
819
820 if (ShouldAbort)
821 report_fatal_error(Twine(R.getMsg()));
822
823 ORE.emit(R);
824 LLVM_DEBUG(dbgs() << R.getMsg() << "\n");
825}
826
827void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
829 bool &HadTailCall) {
830 // Allow creating illegal types during DAG building for the basic block.
832
833 // Lower the instructions. If a call is emitted as a tail call, cease emitting
834 // nodes for this block. If an instruction is elided, don't emit it, but do
835 // handle any debug-info attached to it.
836 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
837 if (!ElidedArgCopyInstrs.count(&*I))
838 SDB->visit(*I);
839 else
840 SDB->visitDbgInfo(*I);
841 }
842
843 // Make sure the root of the DAG is up-to-date.
844 CurDAG->setRoot(SDB->getControlRoot());
845 HadTailCall = SDB->HasTailCall;
846 SDB->resolveOrClearDbgInfo();
847 SDB->clear();
848
849 // Final step, emit the lowered DAG as machine code.
850 CodeGenAndEmitDAG();
851}
852
853void SelectionDAGISel::ComputeLiveOutVRegInfo() {
856
857 Worklist.push_back(CurDAG->getRoot().getNode());
858 Added.insert(CurDAG->getRoot().getNode());
859
860 KnownBits Known;
861
862 do {
863 SDNode *N = Worklist.pop_back_val();
864
865 // Otherwise, add all chain operands to the worklist.
866 for (const SDValue &Op : N->op_values())
867 if (Op.getValueType() == MVT::Other && Added.insert(Op.getNode()).second)
868 Worklist.push_back(Op.getNode());
869
870 // If this is a CopyToReg with a vreg dest, process it.
871 if (N->getOpcode() != ISD::CopyToReg)
872 continue;
873
874 unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
875 if (!Register::isVirtualRegister(DestReg))
876 continue;
877
878 // Ignore non-integer values.
879 SDValue Src = N->getOperand(2);
880 EVT SrcVT = Src.getValueType();
881 if (!SrcVT.isInteger())
882 continue;
883
884 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
885 Known = CurDAG->computeKnownBits(Src);
886 FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
887 } while (!Worklist.empty());
888}
889
890void SelectionDAGISel::CodeGenAndEmitDAG() {
891 StringRef GroupName = "sdag";
892 StringRef GroupDescription = "Instruction Selection and Scheduling";
893 std::string BlockName;
894 bool MatchFilterBB = false;
895 (void)MatchFilterBB;
896
897 // Pre-type legalization allow creation of any node types.
899
900#ifndef NDEBUG
901 MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
903 FuncInfo->MBB->getBasicBlock()->getName());
904#endif
905#ifdef NDEBUG
909#endif
910 {
911 BlockName =
912 (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
913 }
914 ISEL_DUMP(dbgs() << "\nInitial selection DAG: "
915 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
916 << "'\n";
917 CurDAG->dump());
918
919#if LLVM_ENABLE_ABI_BREAKING_CHECKS
922#endif
923
924 if (ViewDAGCombine1 && MatchFilterBB)
925 CurDAG->viewGraph("dag-combine1 input for " + BlockName);
926
927 // Run the DAG combiner in pre-legalize mode.
928 {
929 NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
930 GroupDescription, TimePassesIsEnabled);
932 }
933
934 ISEL_DUMP(dbgs() << "\nOptimized lowered selection DAG: "
935 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
936 << "'\n";
937 CurDAG->dump());
938
939#if LLVM_ENABLE_ABI_BREAKING_CHECKS
942#endif
943
944 // Second step, hack on the DAG until it only uses operations and types that
945 // the target supports.
946 if (ViewLegalizeTypesDAGs && MatchFilterBB)
947 CurDAG->viewGraph("legalize-types input for " + BlockName);
948
949 bool Changed;
950 {
951 NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
952 GroupDescription, TimePassesIsEnabled);
953 Changed = CurDAG->LegalizeTypes();
954 }
955
956 ISEL_DUMP(dbgs() << "\nType-legalized selection DAG: "
957 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
958 << "'\n";
959 CurDAG->dump());
960
961#if LLVM_ENABLE_ABI_BREAKING_CHECKS
964#endif
965
966 // Only allow creation of legal node types.
968
969 if (Changed) {
970 if (ViewDAGCombineLT && MatchFilterBB)
971 CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
972
973 // Run the DAG combiner in post-type-legalize mode.
974 {
975 NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
976 GroupName, GroupDescription, TimePassesIsEnabled);
978 }
979
980 ISEL_DUMP(dbgs() << "\nOptimized type-legalized selection DAG: "
981 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
982 << "'\n";
983 CurDAG->dump());
984
985#if LLVM_ENABLE_ABI_BREAKING_CHECKS
988#endif
989 }
990
991 {
992 NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
993 GroupDescription, TimePassesIsEnabled);
994 Changed = CurDAG->LegalizeVectors();
995 }
996
997 if (Changed) {
998 ISEL_DUMP(dbgs() << "\nVector-legalized selection DAG: "
999 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1000 << "'\n";
1001 CurDAG->dump());
1002
1003#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1004 if (TTI->hasBranchDivergence())
1006#endif
1007
1008 {
1009 NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
1010 GroupDescription, TimePassesIsEnabled);
1012 }
1013
1014 ISEL_DUMP(dbgs() << "\nVector/type-legalized selection DAG: "
1015 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1016 << "'\n";
1017 CurDAG->dump());
1018
1019#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1020 if (TTI->hasBranchDivergence())
1022#endif
1023
1024 if (ViewDAGCombineLT && MatchFilterBB)
1025 CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
1026
1027 // Run the DAG combiner in post-type-legalize mode.
1028 {
1029 NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
1030 GroupName, GroupDescription, TimePassesIsEnabled);
1032 }
1033
1034 ISEL_DUMP(dbgs() << "\nOptimized vector-legalized selection DAG: "
1035 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1036 << "'\n";
1037 CurDAG->dump());
1038
1039#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1040 if (TTI->hasBranchDivergence())
1042#endif
1043 }
1044
1045 if (ViewLegalizeDAGs && MatchFilterBB)
1046 CurDAG->viewGraph("legalize input for " + BlockName);
1047
1048 {
1049 NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
1050 GroupDescription, TimePassesIsEnabled);
1051 CurDAG->Legalize();
1052 }
1053
1054 ISEL_DUMP(dbgs() << "\nLegalized selection DAG: "
1055 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1056 << "'\n";
1057 CurDAG->dump());
1058
1059#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1060 if (TTI->hasBranchDivergence())
1062#endif
1063
1064 if (ViewDAGCombine2 && MatchFilterBB)
1065 CurDAG->viewGraph("dag-combine2 input for " + BlockName);
1066
1067 // Run the DAG combiner in post-legalize mode.
1068 {
1069 NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
1070 GroupDescription, TimePassesIsEnabled);
1072 }
1073
1074 ISEL_DUMP(dbgs() << "\nOptimized legalized selection DAG: "
1075 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1076 << "'\n";
1077 CurDAG->dump());
1078
1079#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1080 if (TTI->hasBranchDivergence())
1082#endif
1083
1085 ComputeLiveOutVRegInfo();
1086
1087 if (ViewISelDAGs && MatchFilterBB)
1088 CurDAG->viewGraph("isel input for " + BlockName);
1089
1090 // Third, instruction select all of the operations to machine code, adding the
1091 // code to the MachineBasicBlock.
1092 {
1093 NamedRegionTimer T("isel", "Instruction Selection", GroupName,
1094 GroupDescription, TimePassesIsEnabled);
1095 DoInstructionSelection();
1096 }
1097
1098 ISEL_DUMP(dbgs() << "\nSelected selection DAG: "
1099 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1100 << "'\n";
1101 CurDAG->dump());
1102
1103 if (ViewSchedDAGs && MatchFilterBB)
1104 CurDAG->viewGraph("scheduler input for " + BlockName);
1105
1106 // Schedule machine code.
1107 ScheduleDAGSDNodes *Scheduler = CreateScheduler();
1108 {
1109 NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
1110 GroupDescription, TimePassesIsEnabled);
1111 Scheduler->Run(CurDAG, FuncInfo->MBB);
1112 }
1113
1114 if (ViewSUnitDAGs && MatchFilterBB)
1115 Scheduler->viewGraph();
1116
1117 // Emit machine code to BB. This can change 'BB' to the last block being
1118 // inserted into.
1119 MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
1120 {
1121 NamedRegionTimer T("emit", "Instruction Creation", GroupName,
1122 GroupDescription, TimePassesIsEnabled);
1123
1124 // FuncInfo->InsertPt is passed by reference and set to the end of the
1125 // scheduled instructions.
1126 LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
1127 }
1128
1129 // If the block was split, make sure we update any references that are used to
1130 // update PHI nodes later on.
1131 if (FirstMBB != LastMBB)
1132 SDB->UpdateSplitBlock(FirstMBB, LastMBB);
1133
1134 // Free the scheduler state.
1135 {
1136 NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
1137 GroupDescription, TimePassesIsEnabled);
1138 delete Scheduler;
1139 }
1140
1141 // Free the SelectionDAG state, now that we're finished with it.
1142 CurDAG->clear();
1143}
1144
1145namespace {
1146
1147/// ISelUpdater - helper class to handle updates of the instruction selection
1148/// graph.
1149class ISelUpdater : public SelectionDAG::DAGUpdateListener {
1150 SelectionDAG::allnodes_iterator &ISelPosition;
1151
1152public:
1153 ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
1154 : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
1155
1156 /// NodeDeleted - Handle nodes deleted from the graph. If the node being
1157 /// deleted is the current ISelPosition node, update ISelPosition.
1158 ///
1159 void NodeDeleted(SDNode *N, SDNode *E) override {
1160 if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1161 ++ISelPosition;
1162 }
1163
1164 /// NodeInserted - Handle new nodes inserted into the graph: propagate
1165 /// metadata from root nodes that also applies to new nodes, in case the root
1166 /// is later deleted.
1167 void NodeInserted(SDNode *N) override {
1168 SDNode *CurNode = &*ISelPosition;
1169 if (MDNode *MD = DAG.getPCSections(CurNode))
1170 DAG.addPCSections(N, MD);
1171 if (MDNode *MMRA = DAG.getMMRAMetadata(CurNode))
1172 DAG.addMMRAMetadata(N, MMRA);
1173 }
1174};
1175
1176} // end anonymous namespace
1177
1178// This function is used to enforce the topological node id property
1179// leveraged during instruction selection. Before the selection process all
1180// nodes are given a non-negative id such that all nodes have a greater id than
1181// their operands. As this holds transitively we can prune checks that a node N
1182// is a predecessor of M another by not recursively checking through M's
1183// operands if N's ID is larger than M's ID. This significantly improves
1184// performance of various legality checks (e.g. IsLegalToFold / UpdateChains).
1185
1186// However, when we fuse multiple nodes into a single node during the
1187// selection we may induce a predecessor relationship between inputs and
1188// outputs of distinct nodes being merged, violating the topological property.
1189// Should a fused node have a successor which has yet to be selected,
1190// our legality checks would be incorrect. To avoid this we mark all unselected
1191// successor nodes, i.e. id != -1, as invalid for pruning by bit-negating (x =>
1192// (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1193// We use bit-negation to more clearly enforce that node id -1 can only be
1194// achieved by selected nodes. As the conversion is reversable to the original
1195// Id, topological pruning can still be leveraged when looking for unselected
1196// nodes. This method is called internally in all ISel replacement related
1197// functions.
1200 Nodes.push_back(Node);
1201
1202 while (!Nodes.empty()) {
1203 SDNode *N = Nodes.pop_back_val();
1204 for (auto *U : N->uses()) {
1205 auto UId = U->getNodeId();
1206 if (UId > 0) {
1208 Nodes.push_back(U);
1209 }
1210 }
1211 }
1212}
1213
1214// InvalidateNodeId - As explained in EnforceNodeIdInvariant, mark a
1215// NodeId with the equivalent node id which is invalid for topological
1216// pruning.
1218 int InvalidId = -(N->getNodeId() + 1);
1219 N->setNodeId(InvalidId);
1220}
1221
1222// getUninvalidatedNodeId - get original uninvalidated node id.
1224 int Id = N->getNodeId();
1225 if (Id < -1)
1226 return -(Id + 1);
1227 return Id;
1228}
1229
1230void SelectionDAGISel::DoInstructionSelection() {
1231 LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1232 << printMBBReference(*FuncInfo->MBB) << " '"
1233 << FuncInfo->MBB->getName() << "'\n");
1234
1236
1237 // Select target instructions for the DAG.
1238 {
1239 // Number all nodes with a topological order and set DAGSize.
1241
1242 // Create a dummy node (which is not added to allnodes), that adds
1243 // a reference to the root node, preventing it from being deleted,
1244 // and tracking any changes of the root.
1245 HandleSDNode Dummy(CurDAG->getRoot());
1247 ++ISelPosition;
1248
1249 // Make sure that ISelPosition gets properly updated when nodes are deleted
1250 // in calls made from this function. New nodes inherit relevant metadata.
1251 ISelUpdater ISU(*CurDAG, ISelPosition);
1252
1253 // The AllNodes list is now topological-sorted. Visit the
1254 // nodes by starting at the end of the list (the root of the
1255 // graph) and preceding back toward the beginning (the entry
1256 // node).
1257 while (ISelPosition != CurDAG->allnodes_begin()) {
1258 SDNode *Node = &*--ISelPosition;
1259 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1260 // but there are currently some corner cases that it misses. Also, this
1261 // makes it theoretically possible to disable the DAGCombiner.
1262 if (Node->use_empty())
1263 continue;
1264
1265#ifndef NDEBUG
1267 Nodes.push_back(Node);
1268
1269 while (!Nodes.empty()) {
1270 auto N = Nodes.pop_back_val();
1271 if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1272 continue;
1273 for (const SDValue &Op : N->op_values()) {
1274 if (Op->getOpcode() == ISD::TokenFactor)
1275 Nodes.push_back(Op.getNode());
1276 else {
1277 // We rely on topological ordering of node ids for checking for
1278 // cycles when fusing nodes during selection. All unselected nodes
1279 // successors of an already selected node should have a negative id.
1280 // This assertion will catch such cases. If this assertion triggers
1281 // it is likely you using DAG-level Value/Node replacement functions
1282 // (versus equivalent ISEL replacement) in backend-specific
1283 // selections. See comment in EnforceNodeIdInvariant for more
1284 // details.
1285 assert(Op->getNodeId() != -1 &&
1286 "Node has already selected predecessor node");
1287 }
1288 }
1289 }
1290#endif
1291
1292 // When we are using non-default rounding modes or FP exception behavior
1293 // FP operations are represented by StrictFP pseudo-operations. For
1294 // targets that do not (yet) understand strict FP operations directly,
1295 // we convert them to normal FP opcodes instead at this point. This
1296 // will allow them to be handled by existing target-specific instruction
1297 // selectors.
1298 if (!TLI->isStrictFPEnabled() && Node->isStrictFPOpcode()) {
1299 // For some opcodes, we need to call TLI->getOperationAction using
1300 // the first operand type instead of the result type. Note that this
1301 // must match what SelectionDAGLegalize::LegalizeOp is doing.
1302 EVT ActionVT;
1303 switch (Node->getOpcode()) {
1306 case ISD::STRICT_LRINT:
1307 case ISD::STRICT_LLRINT:
1308 case ISD::STRICT_LROUND:
1310 case ISD::STRICT_FSETCC:
1312 ActionVT = Node->getOperand(1).getValueType();
1313 break;
1314 default:
1315 ActionVT = Node->getValueType(0);
1316 break;
1317 }
1318 if (TLI->getOperationAction(Node->getOpcode(), ActionVT)
1321 }
1322
1323 LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1324 Node->dump(CurDAG));
1325
1326 Select(Node);
1327 }
1328
1329 CurDAG->setRoot(Dummy.getValue());
1330 }
1331
1332 LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1333
1335}
1336
1338 for (const User *U : CPI->users()) {
1339 if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
1340 Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1341 if (IID == Intrinsic::eh_exceptionpointer ||
1342 IID == Intrinsic::eh_exceptioncode)
1343 return true;
1344 }
1345 }
1346 return false;
1347}
1348
1349// wasm.landingpad.index intrinsic is for associating a landing pad index number
1350// with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1351// and store the mapping in the function.
1353 const CatchPadInst *CPI) {
1354 MachineFunction *MF = MBB->getParent();
1355 // In case of single catch (...), we don't emit LSDA, so we don't need
1356 // this information.
1357 bool IsSingleCatchAllClause =
1358 CPI->arg_size() == 1 &&
1359 cast<Constant>(CPI->getArgOperand(0))->isNullValue();
1360 // cathchpads for longjmp use an empty type list, e.g. catchpad within %0 []
1361 // and they don't need LSDA info
1362 bool IsCatchLongjmp = CPI->arg_size() == 0;
1363 if (!IsSingleCatchAllClause && !IsCatchLongjmp) {
1364 // Create a mapping from landing pad label to landing pad index.
1365 bool IntrFound = false;
1366 for (const User *U : CPI->users()) {
1367 if (const auto *Call = dyn_cast<IntrinsicInst>(U)) {
1368 Intrinsic::ID IID = Call->getIntrinsicID();
1369 if (IID == Intrinsic::wasm_landingpad_index) {
1370 Value *IndexArg = Call->getArgOperand(1);
1371 int Index = cast<ConstantInt>(IndexArg)->getZExtValue();
1373 IntrFound = true;
1374 break;
1375 }
1376 }
1377 }
1378 assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1379 (void)IntrFound;
1380 }
1381}
1382
1383/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1384/// do other setup for EH landing-pad blocks.
1385bool SelectionDAGISel::PrepareEHLandingPad() {
1387 const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1388 const BasicBlock *LLVMBB = MBB->getBasicBlock();
1389 const TargetRegisterClass *PtrRC =
1391
1392 auto Pers = classifyEHPersonality(PersonalityFn);
1393
1394 // Catchpads have one live-in register, which typically holds the exception
1395 // pointer or code.
1396 if (isFuncletEHPersonality(Pers)) {
1397 if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
1399 // Get or create the virtual register to hold the pointer or code. Mark
1400 // the live in physreg and copy into the vreg.
1401 MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1402 assert(EHPhysReg && "target lacks exception pointer register");
1403 MBB->addLiveIn(EHPhysReg);
1404 unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
1405 BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(),
1406 TII->get(TargetOpcode::COPY), VReg)
1407 .addReg(EHPhysReg, RegState::Kill);
1408 }
1409 }
1410 return true;
1411 }
1412
1413 // Add a label to mark the beginning of the landing pad. Deletion of the
1414 // landing pad can thus be detected via the MachineModuleInfo.
1416
1417 const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
1418 BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
1419 .addSym(Label);
1420
1421 // If the unwinder does not preserve all registers, ensure that the
1422 // function marks the clobbered registers as used.
1424 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
1426
1427 if (Pers == EHPersonality::Wasm_CXX) {
1428 if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI()))
1430 } else {
1431 // Assign the call site to the landing pad's begin label.
1432 MF->setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
1433 // Mark exception register as live in.
1434 if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1435 FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);
1436 // Mark exception selector register as live in.
1437 if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1438 FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC);
1439 }
1440
1441 return true;
1442}
1443
1444// Mark and Report IPToState for each Block under IsEHa
1445void SelectionDAGISel::reportIPToStateForBlocks(MachineFunction *MF) {
1446 MachineModuleInfo &MMI = MF->getMMI();
1448 if (!EHInfo)
1449 return;
1450 for (MachineBasicBlock &MBB : *MF) {
1451 const BasicBlock *BB = MBB.getBasicBlock();
1452 int State = EHInfo->BlockToStateMap[BB];
1453 if (BB->getFirstMayFaultInst()) {
1454 // Report IP range only for blocks with Faulty inst
1455 auto MBBb = MBB.getFirstNonPHI();
1456 MachineInstr *MIb = &*MBBb;
1457 if (MIb->isTerminator())
1458 continue;
1459
1460 // Insert EH Labels
1461 MCSymbol *BeginLabel = MMI.getContext().createTempSymbol();
1462 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
1463 EHInfo->addIPToStateRange(State, BeginLabel, EndLabel);
1464 BuildMI(MBB, MBBb, SDB->getCurDebugLoc(),
1465 TII->get(TargetOpcode::EH_LABEL))
1466 .addSym(BeginLabel);
1467 auto MBBe = MBB.instr_end();
1468 MachineInstr *MIe = &*(--MBBe);
1469 // insert before (possible multiple) terminators
1470 while (MIe->isTerminator())
1471 MIe = &*(--MBBe);
1472 ++MBBe;
1473 BuildMI(MBB, MBBe, SDB->getCurDebugLoc(),
1474 TII->get(TargetOpcode::EH_LABEL))
1475 .addSym(EndLabel);
1476 }
1477 }
1478}
1479
1480/// isFoldedOrDeadInstruction - Return true if the specified instruction is
1481/// side-effect free and is either dead or folded into a generated instruction.
1482/// Return false if it needs to be emitted.
1484 const FunctionLoweringInfo &FuncInfo) {
1485 return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1486 !I->isTerminator() && // Terminators aren't folded.
1487 !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
1488 !I->isEHPad() && // EH pad instructions aren't folded.
1489 !FuncInfo.isExportedInst(I); // Exported instrs must be computed.
1490}
1491
1493 const Value *Arg, DIExpression *Expr,
1494 DILocalVariable *Var,
1495 DebugLoc DbgLoc) {
1496 if (!Expr->isEntryValue() || !isa<Argument>(Arg))
1497 return false;
1498
1499 auto ArgIt = FuncInfo.ValueMap.find(Arg);
1500 if (ArgIt == FuncInfo.ValueMap.end())
1501 return false;
1502 Register ArgVReg = ArgIt->getSecond();
1503
1504 // Find the corresponding livein physical register to this argument.
1505 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1506 if (VirtReg == ArgVReg) {
1507 // Append an op deref to account for the fact that this is a dbg_declare.
1508 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
1509 FuncInfo.MF->setVariableDbgInfo(Var, Expr, PhysReg, DbgLoc);
1510 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1511 << ", Expr=" << *Expr << ", MCRegister=" << PhysReg
1512 << ", DbgLoc=" << DbgLoc << "\n");
1513 return true;
1514 }
1515 return false;
1516}
1517
1519 const Value *Address, DIExpression *Expr,
1520 DILocalVariable *Var, DebugLoc DbgLoc) {
1521 if (!Address) {
1522 LLVM_DEBUG(dbgs() << "processDbgDeclares skipping " << *Var
1523 << " (bad address)\n");
1524 return false;
1525 }
1526
1527 if (processIfEntryValueDbgDeclare(FuncInfo, Address, Expr, Var, DbgLoc))
1528 return true;
1529
1530 MachineFunction *MF = FuncInfo.MF;
1531 const DataLayout &DL = MF->getDataLayout();
1532
1533 assert(Var && "Missing variable");
1534 assert(DbgLoc && "Missing location");
1535
1536 // Look through casts and constant offset GEPs. These mostly come from
1537 // inalloca.
1538 APInt Offset(DL.getTypeSizeInBits(Address->getType()), 0);
1539 Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1540
1541 // Check if the variable is a static alloca or a byval or inalloca
1542 // argument passed in memory. If it is not, then we will ignore this
1543 // intrinsic and handle this during isel like dbg.value.
1544 int FI = std::numeric_limits<int>::max();
1545 if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
1546 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1547 if (SI != FuncInfo.StaticAllocaMap.end())
1548 FI = SI->second;
1549 } else if (const auto *Arg = dyn_cast<Argument>(Address))
1550 FI = FuncInfo.getArgumentFrameIndex(Arg);
1551
1552 if (FI == std::numeric_limits<int>::max())
1553 return false;
1554
1555 if (Offset.getBoolValue())
1557 Offset.getZExtValue());
1558
1559 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1560 << ", Expr=" << *Expr << ", FI=" << FI
1561 << ", DbgLoc=" << DbgLoc << "\n");
1562 MF->setVariableDbgInfo(Var, Expr, FI, DbgLoc);
1563 return true;
1564}
1565
1566/// Collect llvm.dbg.declare information. This is done after argument lowering
1567/// in case the declarations refer to arguments.
1569 for (const auto &I : instructions(*FuncInfo.Fn)) {
1570 const auto *DI = dyn_cast<DbgDeclareInst>(&I);
1571 if (DI && processDbgDeclare(FuncInfo, DI->getAddress(), DI->getExpression(),
1572 DI->getVariable(), DI->getDebugLoc()))
1573 FuncInfo.PreprocessedDbgDeclares.insert(DI);
1574 for (const DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
1576 processDbgDeclare(FuncInfo, DVR.getVariableLocationOp(0),
1577 DVR.getExpression(), DVR.getVariable(),
1578 DVR.getDebugLoc()))
1579 FuncInfo.PreprocessedDVRDeclares.insert(&DVR);
1580 }
1581 }
1582}
1583
1584/// Collect single location variable information generated with assignment
1585/// tracking. This is done after argument lowering in case the declarations
1586/// refer to arguments.
1588 FunctionVarLocs const *FnVarLocs) {
1589 for (auto It = FnVarLocs->single_locs_begin(),
1590 End = FnVarLocs->single_locs_end();
1591 It != End; ++It) {
1592 assert(!It->Values.hasArgList() && "Single loc variadic ops not supported");
1593 processDbgDeclare(FuncInfo, It->Values.getVariableLocationOp(0), It->Expr,
1594 FnVarLocs->getDILocalVariable(It->VariableID), It->DL);
1595 }
1596}
1597
1598void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1599 FastISelFailed = false;
1600 // Initialize the Fast-ISel state, if needed.
1601 FastISel *FastIS = nullptr;
1603 LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1604 FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1605 }
1606
1608
1609 // Lower arguments up front. An RPO iteration always visits the entry block
1610 // first.
1611 assert(*RPOT.begin() == &Fn.getEntryBlock());
1612 ++NumEntryBlocks;
1613
1614 // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1615 FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
1616 FuncInfo->InsertPt = FuncInfo->MBB->begin();
1617
1619
1620 if (!FastIS) {
1621 LowerArguments(Fn);
1622 } else {
1623 // See if fast isel can lower the arguments.
1624 FastIS->startNewBlock();
1625 if (!FastIS->lowerArguments()) {
1626 FastISelFailed = true;
1627 // Fast isel failed to lower these arguments
1628 ++NumFastIselFailLowerArguments;
1629
1630 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1631 Fn.getSubprogram(),
1632 &Fn.getEntryBlock());
1633 R << "FastISel didn't lower all arguments: "
1634 << ore::NV("Prototype", Fn.getFunctionType());
1636
1637 // Use SelectionDAG argument lowering
1638 LowerArguments(Fn);
1639 CurDAG->setRoot(SDB->getControlRoot());
1640 SDB->clear();
1641 CodeGenAndEmitDAG();
1642 }
1643
1644 // If we inserted any instructions at the beginning, make a note of
1645 // where they are, so we can be sure to emit subsequent instructions
1646 // after them.
1647 if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1648 FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1649 else
1650 FastIS->setLastLocalValue(nullptr);
1651 }
1652
1653 bool Inserted = SwiftError->createEntriesInEntryBlock(SDB->getCurDebugLoc());
1654
1655 if (FastIS && Inserted)
1656 FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1657
1660 "expected AssignmentTrackingAnalysis pass results");
1662 } else {
1664 }
1665
1666 // Iterate over all basic blocks in the function.
1667 for (const BasicBlock *LLVMBB : RPOT) {
1669 bool AllPredsVisited = true;
1670 for (const BasicBlock *Pred : predecessors(LLVMBB)) {
1671 if (!FuncInfo->VisitedBBs.count(Pred)) {
1672 AllPredsVisited = false;
1673 break;
1674 }
1675 }
1676
1677 if (AllPredsVisited) {
1678 for (const PHINode &PN : LLVMBB->phis())
1679 FuncInfo->ComputePHILiveOutRegInfo(&PN);
1680 } else {
1681 for (const PHINode &PN : LLVMBB->phis())
1682 FuncInfo->InvalidatePHILiveOutRegInfo(&PN);
1683 }
1684
1685 FuncInfo->VisitedBBs.insert(LLVMBB);
1686 }
1687
1688 BasicBlock::const_iterator const Begin =
1689 LLVMBB->getFirstNonPHI()->getIterator();
1690 BasicBlock::const_iterator const End = LLVMBB->end();
1692
1693 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1694 if (!FuncInfo->MBB)
1695 continue; // Some blocks like catchpads have no code or MBB.
1696
1697 // Insert new instructions after any phi or argument setup code.
1698 FuncInfo->InsertPt = FuncInfo->MBB->end();
1699
1700 // Setup an EH landing-pad block.
1701 FuncInfo->ExceptionPointerVirtReg = 0;
1702 FuncInfo->ExceptionSelectorVirtReg = 0;
1703 if (LLVMBB->isEHPad())
1704 if (!PrepareEHLandingPad())
1705 continue;
1706
1707 // Before doing SelectionDAG ISel, see if FastISel has been requested.
1708 if (FastIS) {
1709 if (LLVMBB != &Fn.getEntryBlock())
1710 FastIS->startNewBlock();
1711
1712 unsigned NumFastIselRemaining = std::distance(Begin, End);
1713
1714 // Pre-assign swifterror vregs.
1715 SwiftError->preassignVRegs(FuncInfo->MBB, Begin, End);
1716
1717 // Do FastISel on as many instructions as possible.
1718 for (; BI != Begin; --BI) {
1719 const Instruction *Inst = &*std::prev(BI);
1720
1721 // If we no longer require this instruction, skip it.
1722 if (isFoldedOrDeadInstruction(Inst, *FuncInfo) ||
1723 ElidedArgCopyInstrs.count(Inst)) {
1724 --NumFastIselRemaining;
1725 FastIS->handleDbgInfo(Inst);
1726 continue;
1727 }
1728
1729 // Bottom-up: reset the insert pos at the top, after any local-value
1730 // instructions.
1731 FastIS->recomputeInsertPt();
1732
1733 // Try to select the instruction with FastISel.
1734 if (FastIS->selectInstruction(Inst)) {
1735 --NumFastIselRemaining;
1736 ++NumFastIselSuccess;
1737
1738 FastIS->handleDbgInfo(Inst);
1739 // If fast isel succeeded, skip over all the folded instructions, and
1740 // then see if there is a load right before the selected instructions.
1741 // Try to fold the load if so.
1742 const Instruction *BeforeInst = Inst;
1743 while (BeforeInst != &*Begin) {
1744 BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
1745 if (!isFoldedOrDeadInstruction(BeforeInst, *FuncInfo))
1746 break;
1747 }
1748 if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
1749 BeforeInst->hasOneUse() &&
1750 FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
1751 // If we succeeded, don't re-select the load.
1753 << "FastISel folded load: " << *BeforeInst << "\n");
1754 FastIS->handleDbgInfo(BeforeInst);
1755 BI = std::next(BasicBlock::const_iterator(BeforeInst));
1756 --NumFastIselRemaining;
1757 ++NumFastIselSuccess;
1758 }
1759 continue;
1760 }
1761
1762 FastISelFailed = true;
1763
1764 // Then handle certain instructions as single-LLVM-Instruction blocks.
1765 // We cannot separate out GCrelocates to their own blocks since we need
1766 // to keep track of gc-relocates for a particular gc-statepoint. This is
1767 // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1768 // visitGCRelocate.
1769 if (isa<CallInst>(Inst) && !isa<GCStatepointInst>(Inst) &&
1770 !isa<GCRelocateInst>(Inst) && !isa<GCResultInst>(Inst)) {
1771 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1772 Inst->getDebugLoc(), LLVMBB);
1773
1774 R << "FastISel missed call";
1775
1776 if (R.isEnabled() || EnableFastISelAbort) {
1777 std::string InstStrStorage;
1778 raw_string_ostream InstStr(InstStrStorage);
1779 InstStr << *Inst;
1780
1781 R << ": " << InstStrStorage;
1782 }
1783
1785
1786 if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1787 !Inst->use_empty()) {
1788 Register &R = FuncInfo->ValueMap[Inst];
1789 if (!R)
1790 R = FuncInfo->CreateRegs(Inst);
1791 }
1792
1793 bool HadTailCall = false;
1794 MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
1795 SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
1796
1797 // If the call was emitted as a tail call, we're done with the block.
1798 // We also need to delete any previously emitted instructions.
1799 if (HadTailCall) {
1800 FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
1801 --BI;
1802 break;
1803 }
1804
1805 // Recompute NumFastIselRemaining as Selection DAG instruction
1806 // selection may have handled the call, input args, etc.
1807 unsigned RemainingNow = std::distance(Begin, BI);
1808 NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1809 NumFastIselRemaining = RemainingNow;
1810 continue;
1811 }
1812
1813 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1814 Inst->getDebugLoc(), LLVMBB);
1815
1816 bool ShouldAbort = EnableFastISelAbort;
1817 if (Inst->isTerminator()) {
1818 // Use a different message for terminator misses.
1819 R << "FastISel missed terminator";
1820 // Don't abort for terminator unless the level is really high
1821 ShouldAbort = (EnableFastISelAbort > 2);
1822 } else {
1823 R << "FastISel missed";
1824 }
1825
1826 if (R.isEnabled() || EnableFastISelAbort) {
1827 std::string InstStrStorage;
1828 raw_string_ostream InstStr(InstStrStorage);
1829 InstStr << *Inst;
1830 R << ": " << InstStrStorage;
1831 }
1832
1833 reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
1834
1835 NumFastIselFailures += NumFastIselRemaining;
1836 break;
1837 }
1838
1839 FastIS->recomputeInsertPt();
1840 }
1841
1842 if (SP->shouldEmitSDCheck(*LLVMBB)) {
1843 bool FunctionBasedInstrumentation =
1845 SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
1846 FunctionBasedInstrumentation);
1847 }
1848
1849 if (Begin != BI)
1850 ++NumDAGBlocks;
1851 else
1852 ++NumFastIselBlocks;
1853
1854 if (Begin != BI) {
1855 // Run SelectionDAG instruction selection on the remainder of the block
1856 // not handled by FastISel. If FastISel is not run, this is the entire
1857 // block.
1858 bool HadTailCall;
1859 SelectBasicBlock(Begin, BI, HadTailCall);
1860
1861 // But if FastISel was run, we already selected some of the block.
1862 // If we emitted a tail-call, we need to delete any previously emitted
1863 // instruction that follows it.
1864 if (FastIS && HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1865 FastIS->removeDeadCode(FuncInfo->InsertPt, FuncInfo->MBB->end());
1866 }
1867
1868 if (FastIS)
1869 FastIS->finishBasicBlock();
1870 FinishBasicBlock();
1871 FuncInfo->PHINodesToUpdate.clear();
1872 ElidedArgCopyInstrs.clear();
1873 }
1874
1875 // AsynchEH: Report Block State under -AsynchEH
1876 if (Fn.getParent()->getModuleFlag("eh-asynch"))
1877 reportIPToStateForBlocks(MF);
1878
1880
1882
1883 delete FastIS;
1884 SDB->clearDanglingDebugInfo();
1885 SDB->SPDescriptor.resetPerFunctionState();
1886}
1887
1888void
1889SelectionDAGISel::FinishBasicBlock() {
1890 LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1891 << FuncInfo->PHINodesToUpdate.size() << "\n";
1892 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1893 ++i) dbgs()
1894 << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1895 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1896
1897 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1898 // PHI nodes in successors.
1899 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1900 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
1901 assert(PHI->isPHI() &&
1902 "This is not a machine PHI node that we are updating!");
1903 if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
1904 continue;
1905 PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
1906 }
1907
1908 // Handle stack protector.
1909 if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1910 // The target provides a guard check function. There is no need to
1911 // generate error handling code or to split current basic block.
1912 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1913
1914 // Add load and check to the basicblock.
1915 FuncInfo->MBB = ParentMBB;
1916 FuncInfo->InsertPt =
1918 SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
1919 CurDAG->setRoot(SDB->getRoot());
1920 SDB->clear();
1921 CodeGenAndEmitDAG();
1922
1923 // Clear the Per-BB State.
1924 SDB->SPDescriptor.resetPerBBState();
1925 } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1926 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1927 MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1928
1929 // Find the split point to split the parent mbb. At the same time copy all
1930 // physical registers used in the tail of parent mbb into virtual registers
1931 // before the split point and back into physical registers after the split
1932 // point. This prevents us needing to deal with Live-ins and many other
1933 // register allocation issues caused by us splitting the parent mbb. The
1934 // register allocator will clean up said virtual copies later on.
1935 MachineBasicBlock::iterator SplitPoint =
1937
1938 // Splice the terminator of ParentMBB into SuccessMBB.
1939 SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
1940 SplitPoint,
1941 ParentMBB->end());
1942
1943 // Add compare/jump on neq/jump to the parent BB.
1944 FuncInfo->MBB = ParentMBB;
1945 FuncInfo->InsertPt = ParentMBB->end();
1946 SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
1947 CurDAG->setRoot(SDB->getRoot());
1948 SDB->clear();
1949 CodeGenAndEmitDAG();
1950
1951 // CodeGen Failure MBB if we have not codegened it yet.
1952 MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1953 if (FailureMBB->empty()) {
1954 FuncInfo->MBB = FailureMBB;
1955 FuncInfo->InsertPt = FailureMBB->end();
1956 SDB->visitSPDescriptorFailure(SDB->SPDescriptor);
1957 CurDAG->setRoot(SDB->getRoot());
1958 SDB->clear();
1959 CodeGenAndEmitDAG();
1960 }
1961
1962 // Clear the Per-BB State.
1963 SDB->SPDescriptor.resetPerBBState();
1964 }
1965
1966 // Lower each BitTestBlock.
1967 for (auto &BTB : SDB->SL->BitTestCases) {
1968 // Lower header first, if it wasn't already lowered
1969 if (!BTB.Emitted) {
1970 // Set the current basic block to the mbb we wish to insert the code into
1971 FuncInfo->MBB = BTB.Parent;
1972 FuncInfo->InsertPt = FuncInfo->MBB->end();
1973 // Emit the code
1974 SDB->visitBitTestHeader(BTB, FuncInfo->MBB);
1975 CurDAG->setRoot(SDB->getRoot());
1976 SDB->clear();
1977 CodeGenAndEmitDAG();
1978 }
1979
1980 BranchProbability UnhandledProb = BTB.Prob;
1981 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
1982 UnhandledProb -= BTB.Cases[j].ExtraProb;
1983 // Set the current basic block to the mbb we wish to insert the code into
1984 FuncInfo->MBB = BTB.Cases[j].ThisBB;
1985 FuncInfo->InsertPt = FuncInfo->MBB->end();
1986 // Emit the code
1987
1988 // If all cases cover a contiguous range, it is not necessary to jump to
1989 // the default block after the last bit test fails. This is because the
1990 // range check during bit test header creation has guaranteed that every
1991 // case here doesn't go outside the range. In this case, there is no need
1992 // to perform the last bit test, as it will always be true. Instead, make
1993 // the second-to-last bit-test fall through to the target of the last bit
1994 // test, and delete the last bit test.
1995
1996 MachineBasicBlock *NextMBB;
1997 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1998 // Second-to-last bit-test with contiguous range or omitted range
1999 // check: fall through to the target of the final bit test.
2000 NextMBB = BTB.Cases[j + 1].TargetBB;
2001 } else if (j + 1 == ej) {
2002 // For the last bit test, fall through to Default.
2003 NextMBB = BTB.Default;
2004 } else {
2005 // Otherwise, fall through to the next bit test.
2006 NextMBB = BTB.Cases[j + 1].ThisBB;
2007 }
2008
2009 SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
2010 FuncInfo->MBB);
2011
2012 CurDAG->setRoot(SDB->getRoot());
2013 SDB->clear();
2014 CodeGenAndEmitDAG();
2015
2016 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
2017 // Since we're not going to use the final bit test, remove it.
2018 BTB.Cases.pop_back();
2019 break;
2020 }
2021 }
2022
2023 // Update PHI Nodes
2024 for (const std::pair<MachineInstr *, unsigned> &P :
2025 FuncInfo->PHINodesToUpdate) {
2026 MachineInstrBuilder PHI(*MF, P.first);
2027 MachineBasicBlock *PHIBB = PHI->getParent();
2028 assert(PHI->isPHI() &&
2029 "This is not a machine PHI node that we are updating!");
2030 // This is "default" BB. We have two jumps to it. From "header" BB and
2031 // from last "case" BB, unless the latter was skipped.
2032 if (PHIBB == BTB.Default) {
2033 PHI.addReg(P.second).addMBB(BTB.Parent);
2034 if (!BTB.ContiguousRange) {
2035 PHI.addReg(P.second).addMBB(BTB.Cases.back().ThisBB);
2036 }
2037 }
2038 // One of "cases" BB.
2039 for (const SwitchCG::BitTestCase &BT : BTB.Cases) {
2040 MachineBasicBlock* cBB = BT.ThisBB;
2041 if (cBB->isSuccessor(PHIBB))
2042 PHI.addReg(P.second).addMBB(cBB);
2043 }
2044 }
2045 }
2046 SDB->SL->BitTestCases.clear();
2047
2048 // If the JumpTable record is filled in, then we need to emit a jump table.
2049 // Updating the PHI nodes is tricky in this case, since we need to determine
2050 // whether the PHI is a successor of the range check MBB or the jump table MBB
2051 for (unsigned i = 0, e = SDB->SL->JTCases.size(); i != e; ++i) {
2052 // Lower header first, if it wasn't already lowered
2053 if (!SDB->SL->JTCases[i].first.Emitted) {
2054 // Set the current basic block to the mbb we wish to insert the code into
2055 FuncInfo->MBB = SDB->SL->JTCases[i].first.HeaderBB;
2056 FuncInfo->InsertPt = FuncInfo->MBB->end();
2057 // Emit the code
2058 SDB->visitJumpTableHeader(SDB->SL->JTCases[i].second,
2059 SDB->SL->JTCases[i].first, FuncInfo->MBB);
2060 CurDAG->setRoot(SDB->getRoot());
2061 SDB->clear();
2062 CodeGenAndEmitDAG();
2063 }
2064
2065 // Set the current basic block to the mbb we wish to insert the code into
2066 FuncInfo->MBB = SDB->SL->JTCases[i].second.MBB;
2067 FuncInfo->InsertPt = FuncInfo->MBB->end();
2068 // Emit the code
2069 SDB->visitJumpTable(SDB->SL->JTCases[i].second);
2070 CurDAG->setRoot(SDB->getRoot());
2071 SDB->clear();
2072 CodeGenAndEmitDAG();
2073
2074 // Update PHI Nodes
2075 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
2076 pi != pe; ++pi) {
2077 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
2078 MachineBasicBlock *PHIBB = PHI->getParent();
2079 assert(PHI->isPHI() &&
2080 "This is not a machine PHI node that we are updating!");
2081 // "default" BB. We can go there only from header BB.
2082 if (PHIBB == SDB->SL->JTCases[i].second.Default)
2083 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
2084 .addMBB(SDB->SL->JTCases[i].first.HeaderBB);
2085 // JT BB. Just iterate over successors here
2086 if (FuncInfo->MBB->isSuccessor(PHIBB))
2087 PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
2088 }
2089 }
2090 SDB->SL->JTCases.clear();
2091
2092 // If we generated any switch lowering information, build and codegen any
2093 // additional DAGs necessary.
2094 for (unsigned i = 0, e = SDB->SL->SwitchCases.size(); i != e; ++i) {
2095 // Set the current basic block to the mbb we wish to insert the code into
2096 FuncInfo->MBB = SDB->SL->SwitchCases[i].ThisBB;
2097 FuncInfo->InsertPt = FuncInfo->MBB->end();
2098
2099 // Determine the unique successors.
2101 Succs.push_back(SDB->SL->SwitchCases[i].TrueBB);
2102 if (SDB->SL->SwitchCases[i].TrueBB != SDB->SL->SwitchCases[i].FalseBB)
2103 Succs.push_back(SDB->SL->SwitchCases[i].FalseBB);
2104
2105 // Emit the code. Note that this could result in FuncInfo->MBB being split.
2106 SDB->visitSwitchCase(SDB->SL->SwitchCases[i], FuncInfo->MBB);
2107 CurDAG->setRoot(SDB->getRoot());
2108 SDB->clear();
2109 CodeGenAndEmitDAG();
2110
2111 // Remember the last block, now that any splitting is done, for use in
2112 // populating PHI nodes in successors.
2113 MachineBasicBlock *ThisBB = FuncInfo->MBB;
2114
2115 // Handle any PHI nodes in successors of this chunk, as if we were coming
2116 // from the original BB before switch expansion. Note that PHI nodes can
2117 // occur multiple times in PHINodesToUpdate. We have to be very careful to
2118 // handle them the right number of times.
2119 for (MachineBasicBlock *Succ : Succs) {
2120 FuncInfo->MBB = Succ;
2121 FuncInfo->InsertPt = FuncInfo->MBB->end();
2122 // FuncInfo->MBB may have been removed from the CFG if a branch was
2123 // constant folded.
2124 if (ThisBB->isSuccessor(FuncInfo->MBB)) {
2126 MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
2127 MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
2129 // This value for this PHI node is recorded in PHINodesToUpdate.
2130 for (unsigned pn = 0; ; ++pn) {
2131 assert(pn != FuncInfo->PHINodesToUpdate.size() &&
2132 "Didn't find PHI entry!");
2133 if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
2134 PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
2135 break;
2136 }
2137 }
2138 }
2139 }
2140 }
2141 }
2142 SDB->SL->SwitchCases.clear();
2143}
2144
2145/// Create the scheduler. If a specific scheduler was specified
2146/// via the SchedulerRegistry, use it, otherwise select the
2147/// one preferred by the target.
2148///
2149ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
2150 return ISHeuristic(this, OptLevel);
2151}
2152
2153//===----------------------------------------------------------------------===//
2154// Helper functions used by the generated instruction selector.
2155//===----------------------------------------------------------------------===//
2156// Calls to these methods are generated by tblgen.
2157
2158/// CheckAndMask - The isel is trying to match something like (and X, 255). If
2159/// the dag combiner simplified the 255, we still want to match. RHS is the
2160/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2161/// specified in the .td file (e.g. 255).
2163 int64_t DesiredMaskS) const {
2164 const APInt &ActualMask = RHS->getAPIntValue();
2165 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2166
2167 // If the actual mask exactly matches, success!
2168 if (ActualMask == DesiredMask)
2169 return true;
2170
2171 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2172 if (!ActualMask.isSubsetOf(DesiredMask))
2173 return false;
2174
2175 // Otherwise, the DAG Combiner may have proven that the value coming in is
2176 // either already zero or is not demanded. Check for known zero input bits.
2177 APInt NeededMask = DesiredMask & ~ActualMask;
2178 if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
2179 return true;
2180
2181 // TODO: check to see if missing bits are just not demanded.
2182
2183 // Otherwise, this pattern doesn't match.
2184 return false;
2185}
2186
2187/// CheckOrMask - The isel is trying to match something like (or X, 255). If
2188/// the dag combiner simplified the 255, we still want to match. RHS is the
2189/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2190/// specified in the .td file (e.g. 255).
2192 int64_t DesiredMaskS) const {
2193 const APInt &ActualMask = RHS->getAPIntValue();
2194 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2195
2196 // If the actual mask exactly matches, success!
2197 if (ActualMask == DesiredMask)
2198 return true;
2199
2200 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2201 if (!ActualMask.isSubsetOf(DesiredMask))
2202 return false;
2203
2204 // Otherwise, the DAG Combiner may have proven that the value coming in is
2205 // either already zero or is not demanded. Check for known zero input bits.
2206 APInt NeededMask = DesiredMask & ~ActualMask;
2208
2209 // If all the missing bits in the or are already known to be set, match!
2210 if (NeededMask.isSubsetOf(Known.One))
2211 return true;
2212
2213 // TODO: check to see if missing bits are just not demanded.
2214
2215 // Otherwise, this pattern doesn't match.
2216 return false;
2217}
2218
2219/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2220/// by tblgen. Others should not call it.
2222 const SDLoc &DL) {
2223 std::vector<SDValue> InOps;
2224 std::swap(InOps, Ops);
2225
2226 Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
2227 Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
2228 Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
2229 Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2230
2231 unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2232 if (InOps[e-1].getValueType() == MVT::Glue)
2233 --e; // Don't process a glue operand if it is here.
2234
2235 while (i != e) {
2236 InlineAsm::Flag Flags(InOps[i]->getAsZExtVal());
2237 if (!Flags.isMemKind() && !Flags.isFuncKind()) {
2238 // Just skip over this operand, copying the operands verbatim.
2239 Ops.insert(Ops.end(), InOps.begin() + i,
2240 InOps.begin() + i + Flags.getNumOperandRegisters() + 1);
2241 i += Flags.getNumOperandRegisters() + 1;
2242 } else {
2243 assert(Flags.getNumOperandRegisters() == 1 &&
2244 "Memory operand with multiple values?");
2245
2246 unsigned TiedToOperand;
2247 if (Flags.isUseOperandTiedToDef(TiedToOperand)) {
2248 // We need the constraint ID from the operand this is tied to.
2249 unsigned CurOp = InlineAsm::Op_FirstOperand;
2250 Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
2251 for (; TiedToOperand; --TiedToOperand) {
2252 CurOp += Flags.getNumOperandRegisters() + 1;
2253 Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
2254 }
2255 }
2256
2257 // Otherwise, this is a memory operand. Ask the target to select it.
2258 std::vector<SDValue> SelOps;
2259 const InlineAsm::ConstraintCode ConstraintID =
2260 Flags.getMemoryConstraintID();
2261 if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
2262 report_fatal_error("Could not match memory address. Inline asm"
2263 " failure!");
2264
2265 // Add this to the output node.
2266 Flags = InlineAsm::Flag(Flags.isMemKind() ? InlineAsm::Kind::Mem
2268 SelOps.size());
2269 Flags.setMemConstraint(ConstraintID);
2270 Ops.push_back(CurDAG->getTargetConstant(Flags, DL, MVT::i32));
2271 llvm::append_range(Ops, SelOps);
2272 i += 2;
2273 }
2274 }
2275
2276 // Add the glue input back if present.
2277 if (e != InOps.size())
2278 Ops.push_back(InOps.back());
2279}
2280
2281/// findGlueUse - Return use of MVT::Glue value produced by the specified
2282/// SDNode.
2283///
2285 unsigned FlagResNo = N->getNumValues()-1;
2286 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2287 SDUse &Use = I.getUse();
2288 if (Use.getResNo() == FlagResNo)
2289 return Use.getUser();
2290 }
2291 return nullptr;
2292}
2293
2294/// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2295/// beyond "ImmedUse". We may ignore chains as they are checked separately.
2296static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2297 bool IgnoreChains) {
2300 // Only check if we have non-immediate uses of Def.
2301 if (ImmedUse->isOnlyUserOf(Def))
2302 return false;
2303
2304 // We don't care about paths to Def that go through ImmedUse so mark it
2305 // visited and mark non-def operands as used.
2306 Visited.insert(ImmedUse);
2307 for (const SDValue &Op : ImmedUse->op_values()) {
2308 SDNode *N = Op.getNode();
2309 // Ignore chain deps (they are validated by
2310 // HandleMergeInputChains) and immediate uses
2311 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2312 continue;
2313 if (!Visited.insert(N).second)
2314 continue;
2315 WorkList.push_back(N);
2316 }
2317
2318 // Initialize worklist to operands of Root.
2319 if (Root != ImmedUse) {
2320 for (const SDValue &Op : Root->op_values()) {
2321 SDNode *N = Op.getNode();
2322 // Ignore chains (they are validated by HandleMergeInputChains)
2323 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2324 continue;
2325 if (!Visited.insert(N).second)
2326 continue;
2327 WorkList.push_back(N);
2328 }
2329 }
2330
2331 return SDNode::hasPredecessorHelper(Def, Visited, WorkList, 0, true);
2332}
2333
2334/// IsProfitableToFold - Returns true if it's profitable to fold the specific
2335/// operand node N of U during instruction selection that starts at Root.
2337 SDNode *Root) const {
2339 return false;
2340 return N.hasOneUse();
2341}
2342
2343/// IsLegalToFold - Returns true if the specific operand node N of
2344/// U can be folded during instruction selection that starts at Root.
2346 CodeGenOptLevel OptLevel,
2347 bool IgnoreChains) {
2349 return false;
2350
2351 // If Root use can somehow reach N through a path that doesn't contain
2352 // U then folding N would create a cycle. e.g. In the following
2353 // diagram, Root can reach N through X. If N is folded into Root, then
2354 // X is both a predecessor and a successor of U.
2355 //
2356 // [N*] //
2357 // ^ ^ //
2358 // / \ //
2359 // [U*] [X]? //
2360 // ^ ^ //
2361 // \ / //
2362 // \ / //
2363 // [Root*] //
2364 //
2365 // * indicates nodes to be folded together.
2366 //
2367 // If Root produces glue, then it gets (even more) interesting. Since it
2368 // will be "glued" together with its glue use in the scheduler, we need to
2369 // check if it might reach N.
2370 //
2371 // [N*] //
2372 // ^ ^ //
2373 // / \ //
2374 // [U*] [X]? //
2375 // ^ ^ //
2376 // \ \ //
2377 // \ | //
2378 // [Root*] | //
2379 // ^ | //
2380 // f | //
2381 // | / //
2382 // [Y] / //
2383 // ^ / //
2384 // f / //
2385 // | / //
2386 // [GU] //
2387 //
2388 // If GU (glue use) indirectly reaches N (the load), and Root folds N
2389 // (call it Fold), then X is a predecessor of GU and a successor of
2390 // Fold. But since Fold and GU are glued together, this will create
2391 // a cycle in the scheduling graph.
2392
2393 // If the node has glue, walk down the graph to the "lowest" node in the
2394 // glueged set.
2395 EVT VT = Root->getValueType(Root->getNumValues()-1);
2396 while (VT == MVT::Glue) {
2397 SDNode *GU = findGlueUse(Root);
2398 if (!GU)
2399 break;
2400 Root = GU;
2401 VT = Root->getValueType(Root->getNumValues()-1);
2402
2403 // If our query node has a glue result with a use, we've walked up it. If
2404 // the user (which has already been selected) has a chain or indirectly uses
2405 // the chain, HandleMergeInputChains will not consider it. Because of
2406 // this, we cannot ignore chains in this predicate.
2407 IgnoreChains = false;
2408 }
2409
2410 return !findNonImmUse(Root, N.getNode(), U, IgnoreChains);
2411}
2412
2413void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
2414 SDLoc DL(N);
2415
2416 std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2418
2419 const EVT VTs[] = {MVT::Other, MVT::Glue};
2420 SDValue New = CurDAG->getNode(N->getOpcode(), DL, VTs, Ops);
2421 New->setNodeId(-1);
2422 ReplaceUses(N, New.getNode());
2424}
2425
2426void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2427 SDLoc dl(Op);
2428 MDNodeSDNode *MD = cast<MDNodeSDNode>(Op->getOperand(1));
2429 const MDString *RegStr = cast<MDString>(MD->getMD()->getOperand(0));
2430
2431 EVT VT = Op->getValueType(0);
2432 LLT Ty = VT.isSimple() ? getLLTForMVT(VT.getSimpleVT()) : LLT();
2433 Register Reg =
2434 TLI->getRegisterByName(RegStr->getString().data(), Ty,
2437 Op->getOperand(0), dl, Reg, Op->getValueType(0));
2438 New->setNodeId(-1);
2439 ReplaceUses(Op, New.getNode());
2441}
2442
2443void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2444 SDLoc dl(Op);
2445 MDNodeSDNode *MD = cast<MDNodeSDNode>(Op->getOperand(1));
2446 const MDString *RegStr = cast<MDString>(MD->getMD()->getOperand(0));
2447
2448 EVT VT = Op->getOperand(2).getValueType();
2449 LLT Ty = VT.isSimple() ? getLLTForMVT(VT.getSimpleVT()) : LLT();
2450
2451 Register Reg = TLI->getRegisterByName(RegStr->getString().data(), Ty,
2454 Op->getOperand(0), dl, Reg, Op->getOperand(2));
2455 New->setNodeId(-1);
2456 ReplaceUses(Op, New.getNode());
2458}
2459
2460void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2461 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2462}
2463
2464void SelectionDAGISel::Select_FREEZE(SDNode *N) {
2465 // TODO: We don't have FREEZE pseudo-instruction in MachineInstr-level now.
2466 // If FREEZE instruction is added later, the code below must be changed as
2467 // well.
2468 CurDAG->SelectNodeTo(N, TargetOpcode::COPY, N->getValueType(0),
2469 N->getOperand(0));
2470}
2471
2472void SelectionDAGISel::Select_ARITH_FENCE(SDNode *N) {
2473 CurDAG->SelectNodeTo(N, TargetOpcode::ARITH_FENCE, N->getValueType(0),
2474 N->getOperand(0));
2475}
2476
2477void SelectionDAGISel::Select_MEMBARRIER(SDNode *N) {
2478 CurDAG->SelectNodeTo(N, TargetOpcode::MEMBARRIER, N->getValueType(0),
2479 N->getOperand(0));
2480}
2481
2482void SelectionDAGISel::Select_CONVERGENCECTRL_ANCHOR(SDNode *N) {
2483 CurDAG->SelectNodeTo(N, TargetOpcode::CONVERGENCECTRL_ANCHOR,
2484 N->getValueType(0));
2485}
2486
2487void SelectionDAGISel::Select_CONVERGENCECTRL_ENTRY(SDNode *N) {
2488 CurDAG->SelectNodeTo(N, TargetOpcode::CONVERGENCECTRL_ENTRY,
2489 N->getValueType(0));
2490}
2491
2492void SelectionDAGISel::Select_CONVERGENCECTRL_LOOP(SDNode *N) {
2493 CurDAG->SelectNodeTo(N, TargetOpcode::CONVERGENCECTRL_LOOP,
2494 N->getValueType(0), N->getOperand(0));
2495}
2496
2497void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
2498 SDValue OpVal, SDLoc DL) {
2499 SDNode *OpNode = OpVal.getNode();
2500
2501 // FrameIndex nodes should have been directly emitted to TargetFrameIndex
2502 // nodes at DAG-construction time.
2503 assert(OpNode->getOpcode() != ISD::FrameIndex);
2504
2505 if (OpNode->getOpcode() == ISD::Constant) {
2506 Ops.push_back(
2507 CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
2509 OpVal.getValueType()));
2510 } else {
2511 Ops.push_back(OpVal);
2512 }
2513}
2514
2515void SelectionDAGISel::Select_STACKMAP(SDNode *N) {
2517 auto *It = N->op_begin();
2518 SDLoc DL(N);
2519
2520 // Stash the chain and glue operands so we can move them to the end.
2521 SDValue Chain = *It++;
2522 SDValue InGlue = *It++;
2523
2524 // <id> operand.
2525 SDValue ID = *It++;
2526 assert(ID.getValueType() == MVT::i64);
2527 Ops.push_back(ID);
2528
2529 // <numShadowBytes> operand.
2530 SDValue Shad = *It++;
2531 assert(Shad.getValueType() == MVT::i32);
2532 Ops.push_back(Shad);
2533
2534 // Live variable operands.
2535 for (; It != N->op_end(); It++)
2536 pushStackMapLiveVariable(Ops, *It, DL);
2537
2538 Ops.push_back(Chain);
2539 Ops.push_back(InGlue);
2540
2541 SDVTList NodeTys = CurDAG->getVTList(MVT::Other, MVT::Glue);
2542 CurDAG->SelectNodeTo(N, TargetOpcode::STACKMAP, NodeTys, Ops);
2543}
2544
2545void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
2547 auto *It = N->op_begin();
2548 SDLoc DL(N);
2549
2550 // Cache arguments that will be moved to the end in the target node.
2551 SDValue Chain = *It++;
2552 std::optional<SDValue> Glue;
2553 if (It->getValueType() == MVT::Glue)
2554 Glue = *It++;
2555 SDValue RegMask = *It++;
2556
2557 // <id> operand.
2558 SDValue ID = *It++;
2559 assert(ID.getValueType() == MVT::i64);
2560 Ops.push_back(ID);
2561
2562 // <numShadowBytes> operand.
2563 SDValue Shad = *It++;
2564 assert(Shad.getValueType() == MVT::i32);
2565 Ops.push_back(Shad);
2566
2567 // Add the callee.
2568 Ops.push_back(*It++);
2569
2570 // Add <numArgs>.
2571 SDValue NumArgs = *It++;
2572 assert(NumArgs.getValueType() == MVT::i32);
2573 Ops.push_back(NumArgs);
2574
2575 // Calling convention.
2576 Ops.push_back(*It++);
2577
2578 // Push the args for the call.
2579 for (uint64_t I = NumArgs->getAsZExtVal(); I != 0; I--)
2580 Ops.push_back(*It++);
2581
2582 // Now push the live variables.
2583 for (; It != N->op_end(); It++)
2584 pushStackMapLiveVariable(Ops, *It, DL);
2585
2586 // Finally, the regmask, chain and (if present) glue are moved to the end.
2587 Ops.push_back(RegMask);
2588 Ops.push_back(Chain);
2589 if (Glue.has_value())
2590 Ops.push_back(*Glue);
2591
2592 SDVTList NodeTys = N->getVTList();
2593 CurDAG->SelectNodeTo(N, TargetOpcode::PATCHPOINT, NodeTys, Ops);
2594}
2595
2596/// GetVBR - decode a vbr encoding whose top bit is set.
2598GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2599 assert(Val >= 128 && "Not a VBR");
2600 Val &= 127; // Remove first vbr bit.
2601
2602 unsigned Shift = 7;
2603 uint64_t NextBits;
2604 do {
2605 NextBits = MatcherTable[Idx++];
2606 Val |= (NextBits&127) << Shift;
2607 Shift += 7;
2608 } while (NextBits & 128);
2609
2610 return Val;
2611}
2612
2613void SelectionDAGISel::Select_JUMP_TABLE_DEBUG_INFO(SDNode *N) {
2614 SDLoc dl(N);
2615 CurDAG->SelectNodeTo(N, TargetOpcode::JUMP_TABLE_DEBUG_INFO, MVT::Glue,
2616 CurDAG->getTargetConstant(N->getConstantOperandVal(1),
2617 dl, MVT::i64, true));
2618}
2619
2620/// When a match is complete, this method updates uses of interior chain results
2621/// to use the new results.
2622void SelectionDAGISel::UpdateChains(
2623 SDNode *NodeToMatch, SDValue InputChain,
2624 SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2625 SmallVector<SDNode*, 4> NowDeadNodes;
2626
2627 // Now that all the normal results are replaced, we replace the chain and
2628 // glue results if present.
2629 if (!ChainNodesMatched.empty()) {
2630 assert(InputChain.getNode() &&
2631 "Matched input chains but didn't produce a chain");
2632 // Loop over all of the nodes we matched that produced a chain result.
2633 // Replace all the chain results with the final chain we ended up with.
2634 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2635 SDNode *ChainNode = ChainNodesMatched[i];
2636 // If ChainNode is null, it's because we replaced it on a previous
2637 // iteration and we cleared it out of the map. Just skip it.
2638 if (!ChainNode)
2639 continue;
2640
2641 assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2642 "Deleted node left in chain");
2643
2644 // Don't replace the results of the root node if we're doing a
2645 // MorphNodeTo.
2646 if (ChainNode == NodeToMatch && isMorphNodeTo)
2647 continue;
2648
2649 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2650 if (ChainVal.getValueType() == MVT::Glue)
2651 ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
2652 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2654 *CurDAG, [&](SDNode *N, SDNode *E) {
2655 std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
2656 static_cast<SDNode *>(nullptr));
2657 });
2658 if (ChainNode->getOpcode() != ISD::TokenFactor)
2659 ReplaceUses(ChainVal, InputChain);
2660
2661 // If the node became dead and we haven't already seen it, delete it.
2662 if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2663 !llvm::is_contained(NowDeadNodes, ChainNode))
2664 NowDeadNodes.push_back(ChainNode);
2665 }
2666 }
2667
2668 if (!NowDeadNodes.empty())
2669 CurDAG->RemoveDeadNodes(NowDeadNodes);
2670
2671 LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2672}
2673
2674/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2675/// operation for when the pattern matched at least one node with a chains. The
2676/// input vector contains a list of all of the chained nodes that we match. We
2677/// must determine if this is a valid thing to cover (i.e. matching it won't
2678/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2679/// be used as the input node chain for the generated nodes.
2680static SDValue
2682 SelectionDAG *CurDAG) {
2683
2686 SmallVector<SDValue, 3> InputChains;
2687 unsigned int Max = 8192;
2688
2689 // Quick exit on trivial merge.
2690 if (ChainNodesMatched.size() == 1)
2691 return ChainNodesMatched[0]->getOperand(0);
2692
2693 // Add chains that aren't already added (internal). Peek through
2694 // token factors.
2695 std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2696 if (V.getValueType() != MVT::Other)
2697 return;
2698 if (V->getOpcode() == ISD::EntryToken)
2699 return;
2700 if (!Visited.insert(V.getNode()).second)
2701 return;
2702 if (V->getOpcode() == ISD::TokenFactor) {
2703 for (const SDValue &Op : V->op_values())
2704 AddChains(Op);
2705 } else
2706 InputChains.push_back(V);
2707 };
2708
2709 for (auto *N : ChainNodesMatched) {
2710 Worklist.push_back(N);
2711 Visited.insert(N);
2712 }
2713
2714 while (!Worklist.empty())
2715 AddChains(Worklist.pop_back_val()->getOperand(0));
2716
2717 // Skip the search if there are no chain dependencies.
2718 if (InputChains.size() == 0)
2719 return CurDAG->getEntryNode();
2720
2721 // If one of these chains is a successor of input, we must have a
2722 // node that is both the predecessor and successor of the
2723 // to-be-merged nodes. Fail.
2724 Visited.clear();
2725 for (SDValue V : InputChains)
2726 Worklist.push_back(V.getNode());
2727
2728 for (auto *N : ChainNodesMatched)
2729 if (SDNode::hasPredecessorHelper(N, Visited, Worklist, Max, true))
2730 return SDValue();
2731
2732 // Return merged chain.
2733 if (InputChains.size() == 1)
2734 return InputChains[0];
2735 return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2736 MVT::Other, InputChains);
2737}
2738
2739/// MorphNode - Handle morphing a node in place for the selector.
2740SDNode *SelectionDAGISel::
2741MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2742 ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2743 // It is possible we're using MorphNodeTo to replace a node with no
2744 // normal results with one that has a normal result (or we could be
2745 // adding a chain) and the input could have glue and chains as well.
2746 // In this case we need to shift the operands down.
2747 // FIXME: This is a horrible hack and broken in obscure cases, no worse
2748 // than the old isel though.
2749 int OldGlueResultNo = -1, OldChainResultNo = -1;
2750
2751 unsigned NTMNumResults = Node->getNumValues();
2752 if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
2753 OldGlueResultNo = NTMNumResults-1;
2754 if (NTMNumResults != 1 &&
2755 Node->getValueType(NTMNumResults-2) == MVT::Other)
2756 OldChainResultNo = NTMNumResults-2;
2757 } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
2758 OldChainResultNo = NTMNumResults-1;
2759
2760 // Call the underlying SelectionDAG routine to do the transmogrification. Note
2761 // that this deletes operands of the old node that become dead.
2762 SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
2763
2764 // MorphNodeTo can operate in two ways: if an existing node with the
2765 // specified operands exists, it can just return it. Otherwise, it
2766 // updates the node in place to have the requested operands.
2767 if (Res == Node) {
2768 // If we updated the node in place, reset the node ID. To the isel,
2769 // this should be just like a newly allocated machine node.
2770 Res->setNodeId(-1);
2771 }
2772
2773 unsigned ResNumResults = Res->getNumValues();
2774 // Move the glue if needed.
2775 if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2776 static_cast<unsigned>(OldGlueResultNo) != ResNumResults - 1)
2777 ReplaceUses(SDValue(Node, OldGlueResultNo),
2778 SDValue(Res, ResNumResults - 1));
2779
2780 if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2781 --ResNumResults;
2782
2783 // Move the chain reference if needed.
2784 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2785 static_cast<unsigned>(OldChainResultNo) != ResNumResults - 1)
2786 ReplaceUses(SDValue(Node, OldChainResultNo),
2787 SDValue(Res, ResNumResults - 1));
2788
2789 // Otherwise, no replacement happened because the node already exists. Replace
2790 // Uses of the old node with the new one.
2791 if (Res != Node) {
2792 ReplaceNode(Node, Res);
2793 } else {
2795 }
2796
2797 return Res;
2798}
2799
2800/// CheckSame - Implements OP_CheckSame.
2802CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2803 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
2804 // Accept if it is exactly the same as a previously recorded node.
2805 unsigned RecNo = MatcherTable[MatcherIndex++];
2806 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2807 return N == RecordedNodes[RecNo].first;
2808}
2809
2810/// CheckChildSame - Implements OP_CheckChildXSame.
2812 const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2813 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
2814 unsigned ChildNo) {
2815 if (ChildNo >= N.getNumOperands())
2816 return false; // Match fails if out of range child #.
2817 return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
2818 RecordedNodes);
2819}
2820
2821/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2823CheckPatternPredicate(unsigned Opcode, const unsigned char *MatcherTable,
2824 unsigned &MatcherIndex, const SelectionDAGISel &SDISel) {
2825 bool TwoBytePredNo =
2827 unsigned PredNo =
2828 TwoBytePredNo || Opcode == SelectionDAGISel::OPC_CheckPatternPredicate
2829 ? MatcherTable[MatcherIndex++]
2831 if (TwoBytePredNo)
2832 PredNo |= MatcherTable[MatcherIndex++] << 8;
2833 return SDISel.CheckPatternPredicate(PredNo);
2834}
2835
2836/// CheckNodePredicate - Implements OP_CheckNodePredicate.
2838CheckNodePredicate(unsigned Opcode, const unsigned char *MatcherTable,
2839 unsigned &MatcherIndex, const SelectionDAGISel &SDISel,
2840 SDNode *N) {
2841 unsigned PredNo = Opcode == SelectionDAGISel::OPC_CheckPredicate
2842 ? MatcherTable[MatcherIndex++]
2844 return SDISel.CheckNodePredicate(N, PredNo);
2845}
2846
2848CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2849 SDNode *N) {
2850 uint16_t Opc = MatcherTable[MatcherIndex++];
2851 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
2852 return N->getOpcode() == Opc;
2853}
2854
2856 SDValue N,
2857 const TargetLowering *TLI,
2858 const DataLayout &DL) {
2859 if (N.getValueType() == VT)
2860 return true;
2861
2862 // Handle the case when VT is iPTR.
2863 return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2864}
2865
2868 const DataLayout &DL, unsigned ChildNo) {
2869 if (ChildNo >= N.getNumOperands())
2870 return false; // Match fails if out of range child #.
2871 return ::CheckType(VT, N.getOperand(ChildNo), TLI, DL);
2872}
2873
2875CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2876 SDValue N) {
2877 return cast<CondCodeSDNode>(N)->get() ==
2878 static_cast<ISD::CondCode>(MatcherTable[MatcherIndex++]);
2879}
2880
2882CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2883 SDValue N) {
2884 if (2 >= N.getNumOperands())
2885 return false;
2886 return ::CheckCondCode(MatcherTable, MatcherIndex, N.getOperand(2));
2887}
2888
2890CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2891 SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2893 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
2894 if (cast<VTSDNode>(N)->getVT() == VT)
2895 return true;
2896
2897 // Handle the case when VT is iPTR.
2898 return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
2899}
2900
2901// Bit 0 stores the sign of the immediate. The upper bits contain the magnitude
2902// shifted left by 1.
2904 if ((V & 1) == 0)
2905 return V >> 1;
2906 if (V != 1)
2907 return -(V >> 1);
2908 // There is no such thing as -0 with integers. "-0" really means MININT.
2909 return 1ULL << 63;
2910}
2911
2913CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2914 SDValue N) {
2915 int64_t Val = MatcherTable[MatcherIndex++];
2916 if (Val & 128)
2917 Val = GetVBR(Val, MatcherTable, MatcherIndex);
2918
2919 Val = decodeSignRotatedValue(Val);
2920
2921 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
2922 return C && C->getAPIntValue().trySExtValue() == Val;
2923}
2924
2926CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2927 SDValue N, unsigned ChildNo) {
2928 if (ChildNo >= N.getNumOperands())
2929 return false; // Match fails if out of range child #.
2930 return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
2931}
2932
2934CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2935 SDValue N, const SelectionDAGISel &SDISel) {
2936 int64_t Val = MatcherTable[MatcherIndex++];
2937 if (Val & 128)
2938 Val = GetVBR(Val, MatcherTable, MatcherIndex);
2939
2940 if (N->getOpcode() != ISD::AND) return false;
2941
2942 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2943 return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
2944}
2945
2947CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2948 const SelectionDAGISel &SDISel) {
2949 int64_t Val = MatcherTable[MatcherIndex++];
2950 if (Val & 128)
2951 Val = GetVBR(Val, MatcherTable, MatcherIndex);
2952
2953 if (N->getOpcode() != ISD::OR) return false;
2954
2955 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2956 return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
2957}
2958
2959/// IsPredicateKnownToFail - If we know how and can do so without pushing a
2960/// scope, evaluate the current node. If the current predicate is known to
2961/// fail, set Result=true and return anything. If the current predicate is
2962/// known to pass, set Result=false and return the MatcherIndex to continue
2963/// with. If the current predicate is unknown, set Result=false and return the
2964/// MatcherIndex to continue with.
2965static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2966 unsigned Index, SDValue N,
2967 bool &Result,
2968 const SelectionDAGISel &SDISel,
2969 SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2970 unsigned Opcode = Table[Index++];
2971 switch (Opcode) {
2972 default:
2973 Result = false;
2974 return Index-1; // Could not evaluate this predicate.
2976 Result = !::CheckSame(Table, Index, N, RecordedNodes);
2977 return Index;
2982 Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
2984 return Index;
2995 Result = !::CheckPatternPredicate(Opcode, Table, Index, SDISel);
2996 return Index;
3006 Result = !::CheckNodePredicate(Opcode, Table, Index, SDISel, N.getNode());
3007 return Index;
3009 Result = !::CheckOpcode(Table, Index, N.getNode());
3010 return Index;
3015 switch (Opcode) {
3017 VT = MVT::i32;
3018 break;
3020 VT = MVT::i64;
3021 break;
3022 default:
3023 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
3024 break;
3025 }
3026 Result = !::CheckType(VT, N, SDISel.TLI, SDISel.CurDAG->getDataLayout());
3027 return Index;
3028 }
3030 unsigned Res = Table[Index++];
3031 Result = !::CheckType(static_cast<MVT::SimpleValueType>(Table[Index++]),
3032 N.getValue(Res), SDISel.TLI,
3033 SDISel.CurDAG->getDataLayout());
3034 return Index;
3035 }
3061 unsigned ChildNo;
3064 VT = MVT::i32;
3066 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
3068 VT = MVT::i64;
3070 } else {
3071 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
3072 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
3073 }
3074 Result = !::CheckChildType(VT, N, SDISel.TLI,
3075 SDISel.CurDAG->getDataLayout(), ChildNo);
3076 return Index;
3077 }
3079 Result = !::CheckCondCode(Table, Index, N);
3080 return Index;
3082 Result = !::CheckChild2CondCode(Table, Index, N);
3083 return Index;
3085 Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
3086 SDISel.CurDAG->getDataLayout());
3087 return Index;
3089 Result = !::CheckInteger(Table, Index, N);
3090 return Index;
3096 Result = !::CheckChildInteger(Table, Index, N,
3098 return Index;
3100 Result = !::CheckAndImm(Table, Index, N, SDISel);
3101 return Index;
3103 Result = !::CheckOrImm(Table, Index, N, SDISel);
3104 return Index;
3105 }
3106}
3107
3108namespace {
3109
3110struct MatchScope {
3111 /// FailIndex - If this match fails, this is the index to continue with.
3112 unsigned FailIndex;
3113
3114 /// NodeStack - The node stack when the scope was formed.
3115 SmallVector<SDValue, 4> NodeStack;
3116
3117 /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
3118 unsigned NumRecordedNodes;
3119
3120 /// NumMatchedMemRefs - The number of matched memref entries.
3121 unsigned NumMatchedMemRefs;
3122
3123 /// InputChain/InputGlue - The current chain/glue
3124 SDValue InputChain, InputGlue;
3125
3126 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
3127 bool HasChainNodesMatched;
3128};
3129
3130/// \A DAG update listener to keep the matching state
3131/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
3132/// change the DAG while matching. X86 addressing mode matcher is an example
3133/// for this.
3134class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
3135{
3136 SDNode **NodeToMatch;
3138 SmallVectorImpl<MatchScope> &MatchScopes;
3139
3140public:
3141 MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
3142 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
3144 : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
3145 RecordedNodes(RN), MatchScopes(MS) {}
3146
3147 void NodeDeleted(SDNode *N, SDNode *E) override {
3148 // Some early-returns here to avoid the search if we deleted the node or
3149 // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
3150 // do, so it's unnecessary to update matching state at that point).
3151 // Neither of these can occur currently because we only install this
3152 // update listener during matching a complex patterns.
3153 if (!E || E->isMachineOpcode())
3154 return;
3155 // Check if NodeToMatch was updated.
3156 if (N == *NodeToMatch)
3157 *NodeToMatch = E;
3158 // Performing linear search here does not matter because we almost never
3159 // run this code. You'd have to have a CSE during complex pattern
3160 // matching.
3161 for (auto &I : RecordedNodes)
3162 if (I.first.getNode() == N)
3163 I.first.setNode(E);
3164
3165 for (auto &I : MatchScopes)
3166 for (auto &J : I.NodeStack)
3167 if (J.getNode() == N)
3168 J.setNode(E);
3169 }
3170};
3171
3172} // end anonymous namespace
3173
3175 const unsigned char *MatcherTable,
3176 unsigned TableSize) {
3177 // FIXME: Should these even be selected? Handle these cases in the caller?
3178 switch (NodeToMatch->getOpcode()) {
3179 default:
3180 break;
3181 case ISD::EntryToken: // These nodes remain the same.
3182 case ISD::BasicBlock:
3183 case ISD::Register:
3184 case ISD::RegisterMask:
3185 case ISD::HANDLENODE:
3186 case ISD::MDNODE_SDNODE:
3192 case ISD::MCSymbol:
3197 case ISD::TokenFactor:
3198 case ISD::CopyFromReg:
3199 case ISD::CopyToReg:
3200 case ISD::EH_LABEL:
3203 case ISD::LIFETIME_END:
3204 case ISD::PSEUDO_PROBE:
3205 NodeToMatch->setNodeId(-1); // Mark selected.
3206 return;
3207 case ISD::AssertSext:
3208 case ISD::AssertZext:
3209 case ISD::AssertAlign:
3210 ReplaceUses(SDValue(NodeToMatch, 0), NodeToMatch->getOperand(0));
3211 CurDAG->RemoveDeadNode(NodeToMatch);
3212 return;
3213 case ISD::INLINEASM:
3214 case ISD::INLINEASM_BR:
3215 Select_INLINEASM(NodeToMatch);
3216 return;
3217 case ISD::READ_REGISTER:
3218 Select_READ_REGISTER(NodeToMatch);
3219 return;
3221 Select_WRITE_REGISTER(NodeToMatch);
3222 return;
3223 case ISD::UNDEF:
3224 Select_UNDEF(NodeToMatch);
3225 return;
3226 case ISD::FREEZE:
3227 Select_FREEZE(NodeToMatch);
3228 return;
3229 case ISD::ARITH_FENCE:
3230 Select_ARITH_FENCE(NodeToMatch);
3231 return;
3232 case ISD::MEMBARRIER:
3233 Select_MEMBARRIER(NodeToMatch);
3234 return;
3235 case ISD::STACKMAP:
3236 Select_STACKMAP(NodeToMatch);
3237 return;
3238 case ISD::PATCHPOINT:
3239 Select_PATCHPOINT(NodeToMatch);
3240 return;
3242 Select_JUMP_TABLE_DEBUG_INFO(NodeToMatch);
3243 return;
3245 Select_CONVERGENCECTRL_ANCHOR(NodeToMatch);
3246 return;
3248 Select_CONVERGENCECTRL_ENTRY(NodeToMatch);
3249 return;
3251 Select_CONVERGENCECTRL_LOOP(NodeToMatch);
3252 return;
3253 }
3254
3255 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
3256
3257 // Set up the node stack with NodeToMatch as the only node on the stack.
3258 SmallVector<SDValue, 8> NodeStack;
3259 SDValue N = SDValue(NodeToMatch, 0);
3260 NodeStack.push_back(N);
3261
3262 // MatchScopes - Scopes used when matching, if a match failure happens, this
3263 // indicates where to continue checking.
3264 SmallVector<MatchScope, 8> MatchScopes;
3265
3266 // RecordedNodes - This is the set of nodes that have been recorded by the
3267 // state machine. The second value is the parent of the node, or null if the
3268 // root is recorded.
3270
3271 // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3272 // pattern.
3274
3275 // These are the current input chain and glue for use when generating nodes.
3276 // Various Emit operations change these. For example, emitting a copytoreg
3277 // uses and updates these.
3278 SDValue InputChain, InputGlue;
3279
3280 // ChainNodesMatched - If a pattern matches nodes that have input/output
3281 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3282 // which ones they are. The result is captured into this list so that we can
3283 // update the chain results when the pattern is complete.
3284 SmallVector<SDNode*, 3> ChainNodesMatched;
3285
3286 LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3287
3288 // Determine where to start the interpreter. Normally we start at opcode #0,
3289 // but if the state machine starts with an OPC_SwitchOpcode, then we
3290 // accelerate the first lookup (which is guaranteed to be hot) with the
3291 // OpcodeOffset table.
3292 unsigned MatcherIndex = 0;
3293
3294 if (!OpcodeOffset.empty()) {
3295 // Already computed the OpcodeOffset table, just index into it.
3296 if (N.getOpcode() < OpcodeOffset.size())
3297 MatcherIndex = OpcodeOffset[N.getOpcode()];
3298 LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3299
3300 } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3301 // Otherwise, the table isn't computed, but the state machine does start
3302 // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3303 // is the first time we're selecting an instruction.
3304 unsigned Idx = 1;
3305 while (true) {
3306 // Get the size of this case.
3307 unsigned CaseSize = MatcherTable[Idx++];
3308 if (CaseSize & 128)
3309 CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
3310 if (CaseSize == 0) break;
3311
3312 // Get the opcode, add the index to the table.
3313 uint16_t Opc = MatcherTable[Idx++];
3314 Opc |= static_cast<uint16_t>(MatcherTable[Idx++]) << 8;
3315 if (Opc >= OpcodeOffset.size())
3316 OpcodeOffset.resize((Opc+1)*2);
3317 OpcodeOffset[Opc] = Idx;
3318 Idx += CaseSize;
3319 }
3320
3321 // Okay, do the lookup for the first opcode.
3322 if (N.getOpcode() < OpcodeOffset.size())
3323 MatcherIndex = OpcodeOffset[N.getOpcode()];
3324 }
3325
3326 while (true) {
3327 assert(MatcherIndex < TableSize && "Invalid index");
3328#ifndef NDEBUG
3329 unsigned CurrentOpcodeIndex = MatcherIndex;
3330#endif
3331 BuiltinOpcodes Opcode =
3332 static_cast<BuiltinOpcodes>(MatcherTable[MatcherIndex++]);
3333 switch (Opcode) {
3334 case OPC_Scope: {
3335 // Okay, the semantics of this operation are that we should push a scope
3336 // then evaluate the first child. However, pushing a scope only to have
3337 // the first check fail (which then pops it) is inefficient. If we can
3338 // determine immediately that the first check (or first several) will
3339 // immediately fail, don't even bother pushing a scope for them.
3340 unsigned FailIndex;
3341
3342 while (true) {
3343 unsigned NumToSkip = MatcherTable[MatcherIndex++];
3344 if (NumToSkip & 128)
3345 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3346 // Found the end of the scope with no match.
3347 if (NumToSkip == 0) {
3348 FailIndex = 0;
3349 break;
3350 }
3351
3352 FailIndex = MatcherIndex+NumToSkip;
3353
3354 unsigned MatcherIndexOfPredicate = MatcherIndex;
3355 (void)MatcherIndexOfPredicate; // silence warning.
3356
3357 // If we can't evaluate this predicate without pushing a scope (e.g. if
3358 // it is a 'MoveParent') or if the predicate succeeds on this node, we
3359 // push the scope and evaluate the full predicate chain.
3360 bool Result;
3361 MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
3362 Result, *this, RecordedNodes);
3363 if (!Result)
3364 break;
3365
3366 LLVM_DEBUG(
3367 dbgs() << " Skipped scope entry (due to false predicate) at "
3368 << "index " << MatcherIndexOfPredicate << ", continuing at "
3369 << FailIndex << "\n");
3370 ++NumDAGIselRetries;
3371
3372 // Otherwise, we know that this case of the Scope is guaranteed to fail,
3373 // move to the next case.
3374 MatcherIndex = FailIndex;
3375 }
3376
3377 // If the whole scope failed to match, bail.
3378 if (FailIndex == 0) break;
3379
3380 // Push a MatchScope which indicates where to go if the first child fails
3381 // to match.
3382 MatchScope NewEntry;
3383 NewEntry.FailIndex = FailIndex;
3384 NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
3385 NewEntry.NumRecordedNodes = RecordedNodes.size();
3386 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3387 NewEntry.InputChain = InputChain;
3388 NewEntry.InputGlue = InputGlue;
3389 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3390 MatchScopes.push_back(NewEntry);
3391 continue;
3392 }
3393 case OPC_RecordNode: {
3394 // Remember this node, it may end up being an operand in the pattern.
3395 SDNode *Parent = nullptr;
3396 if (NodeStack.size() > 1)
3397 Parent = NodeStack[NodeStack.size()-2].getNode();
3398 RecordedNodes.push_back(std::make_pair(N, Parent));
3399 continue;
3400 }
3401
3406 unsigned ChildNo = Opcode-OPC_RecordChild0;
3407 if (ChildNo >= N.getNumOperands())
3408 break; // Match fails if out of range child #.
3409
3410 RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
3411 N.getNode()));
3412 continue;
3413 }
3414 case OPC_RecordMemRef:
3415 if (auto *MN = dyn_cast<MemSDNode>(N))
3416 MatchedMemRefs.push_back(MN->getMemOperand());
3417 else {
3418 LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3419 dbgs() << '\n');
3420 }
3421
3422 continue;
3423
3425 // If the current node has an input glue, capture it in InputGlue.
3426 if (N->getNumOperands() != 0 &&
3427 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
3428 InputGlue = N->getOperand(N->getNumOperands()-1);
3429 continue;
3430
3431 case OPC_MoveChild: {
3432 unsigned ChildNo = MatcherTable[MatcherIndex++];
3433 if (ChildNo >= N.getNumOperands())
3434 break; // Match fails if out of range child #.
3435 N = N.getOperand(ChildNo);
3436 NodeStack.push_back(N);
3437 continue;
3438 }
3439
3440 case OPC_MoveChild0: case OPC_MoveChild1:
3441 case OPC_MoveChild2: case OPC_MoveChild3:
3442 case OPC_MoveChild4: case OPC_MoveChild5:
3443 case OPC_MoveChild6: case OPC_MoveChild7: {
3444 unsigned ChildNo = Opcode-OPC_MoveChild0;
3445 if (ChildNo >= N.getNumOperands())
3446 break; // Match fails if out of range child #.
3447 N = N.getOperand(ChildNo);
3448 NodeStack.push_back(N);
3449 continue;
3450 }
3451
3452 case OPC_MoveSibling:
3453 case OPC_MoveSibling0:
3454 case OPC_MoveSibling1:
3455 case OPC_MoveSibling2:
3456 case OPC_MoveSibling3:
3457 case OPC_MoveSibling4:
3458 case OPC_MoveSibling5:
3459 case OPC_MoveSibling6:
3460 case OPC_MoveSibling7: {
3461 // Pop the current node off the NodeStack.
3462 NodeStack.pop_back();
3463 assert(!NodeStack.empty() && "Node stack imbalance!");
3464 N = NodeStack.back();
3465
3466 unsigned SiblingNo = Opcode == OPC_MoveSibling
3467 ? MatcherTable[MatcherIndex++]
3468 : Opcode - OPC_MoveSibling0;
3469 if (SiblingNo >= N.getNumOperands())
3470 break; // Match fails if out of range sibling #.
3471 N = N.getOperand(SiblingNo);
3472 NodeStack.push_back(N);
3473 continue;
3474 }
3475 case OPC_MoveParent:
3476 // Pop the current node off the NodeStack.
3477 NodeStack.pop_back();
3478 assert(!NodeStack.empty() && "Node stack imbalance!");
3479 N = NodeStack.back();
3480 continue;
3481
3482 case OPC_CheckSame:
3483 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3484 continue;
3485
3488 if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3489 Opcode-OPC_CheckChild0Same))
3490 break;
3491 continue;
3492
3503 if (!::CheckPatternPredicate(Opcode, MatcherTable, MatcherIndex, *this))
3504 break;
3505 continue;
3514 case OPC_CheckPredicate:
3515 if (!::CheckNodePredicate(Opcode, MatcherTable, MatcherIndex, *this,
3516 N.getNode()))
3517 break;
3518 continue;
3520 unsigned OpNum = MatcherTable[MatcherIndex++];
3522
3523 for (unsigned i = 0; i < OpNum; ++i)
3524 Operands.push_back(RecordedNodes[MatcherTable[MatcherIndex++]].first);
3525
3526 unsigned PredNo = MatcherTable[MatcherIndex++];
3527 if (!CheckNodePredicateWithOperands(N.getNode(), PredNo, Operands))
3528 break;
3529 continue;
3530 }
3539 case OPC_CheckComplexPat7: {
3540 unsigned CPNum = Opcode == OPC_CheckComplexPat
3541 ? MatcherTable[MatcherIndex++]
3542 : Opcode - OPC_CheckComplexPat0;
3543 unsigned RecNo = MatcherTable[MatcherIndex++];
3544 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3545
3546 // If target can modify DAG during matching, keep the matching state
3547 // consistent.
3548 std::unique_ptr<MatchStateUpdater> MSU;
3550 MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3551 MatchScopes));
3552
3553 if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
3554 RecordedNodes[RecNo].first, CPNum,
3555 RecordedNodes))
3556 break;
3557 continue;
3558 }
3559 case OPC_CheckOpcode:
3560 if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
3561 continue;
3562
3563 case OPC_CheckType:
3564 case OPC_CheckTypeI32:
3565 case OPC_CheckTypeI64:
3567 switch (Opcode) {
3568 case OPC_CheckTypeI32:
3569 VT = MVT::i32;
3570 break;
3571 case OPC_CheckTypeI64:
3572 VT = MVT::i64;
3573 break;
3574 default:
3575 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3576 break;
3577 }
3578 if (!::CheckType(VT, N, TLI, CurDAG->getDataLayout()))
3579 break;
3580 continue;
3581
3582 case OPC_CheckTypeRes: {
3583 unsigned Res = MatcherTable[MatcherIndex++];
3584 if (!::CheckType(
3585 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]),
3586 N.getValue(Res), TLI, CurDAG->getDataLayout()))
3587 break;
3588 continue;
3589 }
3590
3591 case OPC_SwitchOpcode: {
3592 unsigned CurNodeOpcode = N.getOpcode();
3593 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3594 unsigned CaseSize;
3595 while (true) {
3596 // Get the size of this case.
3597 CaseSize = MatcherTable[MatcherIndex++];
3598 if (CaseSize & 128)
3599 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3600 if (CaseSize == 0) break;
3601
3602 uint16_t Opc = MatcherTable[MatcherIndex++];
3603 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
3604
3605 // If the opcode matches, then we will execute this case.
3606 if (CurNodeOpcode == Opc)
3607 break;
3608
3609 // Otherwise, skip over this case.
3610 MatcherIndex += CaseSize;
3611 }
3612
3613 // If no cases matched, bail out.
3614 if (CaseSize == 0) break;
3615
3616 // Otherwise, execute the case we found.
3617 LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
3618 << MatcherIndex << "\n");
3619 continue;
3620 }
3621
3622 case OPC_SwitchType: {
3623 MVT CurNodeVT = N.getSimpleValueType();
3624 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3625 unsigned CaseSize;
3626 while (true) {
3627 // Get the size of this case.
3628 CaseSize = MatcherTable[MatcherIndex++];
3629 if (CaseSize & 128)
3630 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3631 if (CaseSize == 0) break;
3632
3633 MVT CaseVT =
3634 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3635 if (CaseVT == MVT::iPTR)
3636 CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
3637
3638 // If the VT matches, then we will execute this case.
3639 if (CurNodeVT == CaseVT)
3640 break;
3641
3642 // Otherwise, skip over this case.
3643 MatcherIndex += CaseSize;
3644 }
3645
3646 // If no cases matched, bail out.
3647 if (CaseSize == 0) break;
3648
3649 // Otherwise, execute the case we found.
3650 LLVM_DEBUG(dbgs() << " TypeSwitch[" << CurNodeVT
3651 << "] from " << SwitchStart << " to " << MatcherIndex
3652 << '\n');
3653 continue;
3654 }
3680 unsigned ChildNo;
3683 VT = MVT::i32;
3685 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
3687 VT = MVT::i64;
3689 } else {
3690 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3691 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
3692 }
3693 if (!::CheckChildType(VT, N, TLI, CurDAG->getDataLayout(), ChildNo))
3694 break;
3695 continue;
3696 }
3697 case OPC_CheckCondCode:
3698 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3699 continue;
3701 if (!::CheckChild2CondCode(MatcherTable, MatcherIndex, N)) break;
3702 continue;
3703 case OPC_CheckValueType:
3704 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3706 break;
3707 continue;
3708 case OPC_CheckInteger:
3709 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3710 continue;
3714 if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3715 Opcode-OPC_CheckChild0Integer)) break;
3716 continue;
3717 case OPC_CheckAndImm:
3718 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
3719 continue;
3720 case OPC_CheckOrImm:
3721 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
3722 continue;
3724 if (!ISD::isConstantSplatVectorAllOnes(N.getNode()))
3725 break;
3726 continue;
3728 if (!ISD::isConstantSplatVectorAllZeros(N.getNode()))
3729 break;
3730 continue;
3731
3733 assert(NodeStack.size() != 1 && "No parent node");
3734 // Verify that all intermediate nodes between the root and this one have
3735 // a single use (ignoring chains, which are handled in UpdateChains).
3736 bool HasMultipleUses = false;
3737 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i) {
3738 unsigned NNonChainUses = 0;
3739 SDNode *NS = NodeStack[i].getNode();
3740 for (auto UI = NS->use_begin(), UE = NS->use_end(); UI != UE; ++UI)
3741 if (UI.getUse().getValueType() != MVT::Other)
3742 if (++NNonChainUses > 1) {
3743 HasMultipleUses = true;
3744 break;
3745 }
3746 if (HasMultipleUses) break;
3747 }
3748 if (HasMultipleUses) break;
3749
3750 // Check to see that the target thinks this is profitable to fold and that
3751 // we can fold it without inducing cycles in the graph.
3752 if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3753 NodeToMatch) ||
3754 !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3755 NodeToMatch, OptLevel,
3756 true/*We validate our own chains*/))
3757 break;
3758
3759 continue;
3760 }
3761 case OPC_EmitInteger:
3762 case OPC_EmitInteger8:
3763 case OPC_EmitInteger16:
3764 case OPC_EmitInteger32:
3765 case OPC_EmitInteger64:
3769 switch (Opcode) {
3770 case OPC_EmitInteger8:
3771 VT = MVT::i8;
3772 break;
3773 case OPC_EmitInteger16:
3774 VT = MVT::i16;
3775 break;
3776 case OPC_EmitInteger32:
3778 VT = MVT::i32;
3779 break;
3780 case OPC_EmitInteger64:
3781 VT = MVT::i64;
3782 break;
3783 default:
3784 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3785 break;
3786 }
3787 int64_t Val = MatcherTable[MatcherIndex++];
3788 if (Val & 128)
3789 Val = GetVBR(Val, MatcherTable, MatcherIndex);
3790 if (Opcode >= OPC_EmitInteger && Opcode <= OPC_EmitInteger64)
3791 Val = decodeSignRotatedValue(Val);
3792 RecordedNodes.push_back(std::pair<SDValue, SDNode *>(
3793 CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch), VT), nullptr));
3794 continue;
3795 }
3796 case OPC_EmitRegister:
3798 case OPC_EmitRegisterI64: {
3800 switch (Opcode) {
3802 VT = MVT::i32;
3803 break;
3805 VT = MVT::i64;
3806 break;
3807 default:
3808 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3809 break;
3810 }
3811 unsigned RegNo = MatcherTable[MatcherIndex++];
3812 RecordedNodes.push_back(std::pair<SDValue, SDNode *>(
3813 CurDAG->getRegister(RegNo, VT), nullptr));
3814 continue;
3815 }
3816 case OPC_EmitRegister2: {
3817 // For targets w/ more than 256 register names, the register enum
3818 // values are stored in two bytes in the matcher table (just like
3819 // opcodes).
3821 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3822 unsigned RegNo = MatcherTable[MatcherIndex++];
3823 RegNo |= MatcherTable[MatcherIndex++] << 8;
3824 RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3825 CurDAG->getRegister(RegNo, VT), nullptr));
3826 continue;
3827 }
3828
3838 // Convert from IMM/FPIMM to target version.
3839 unsigned RecNo = Opcode == OPC_EmitConvertToTarget
3840 ? MatcherTable[MatcherIndex++]
3841 : Opcode - OPC_EmitConvertToTarget0;
3842 assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3843 SDValue Imm = RecordedNodes[RecNo].first;
3844
3845 if (Imm->getOpcode() == ISD::Constant) {
3846 const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
3847 Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
3848 Imm.getValueType());
3849 } else if (Imm->getOpcode() == ISD::ConstantFP) {
3850 const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
3851 Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
3852 Imm.getValueType());
3853 }
3854
3855 RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
3856 continue;
3857 }
3858
3859 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3860 case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3861 case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3862 // These are space-optimized forms of OPC_EmitMergeInputChains.
3863 assert(!InputChain.getNode() &&
3864 "EmitMergeInputChains should be the first chain producing node");
3865 assert(ChainNodesMatched.empty() &&
3866 "Should only have one EmitMergeInputChains per match");
3867
3868 // Read all of the chained nodes.
3869 unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3870 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3871 ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3872
3873 // If the chained node is not the root, we can't fold it if it has
3874 // multiple uses.
3875 // FIXME: What if other value results of the node have uses not matched
3876 // by this pattern?
3877 if (ChainNodesMatched.back() != NodeToMatch &&
3878 !RecordedNodes[RecNo].first.hasOneUse()) {
3879 ChainNodesMatched.clear();
3880 break;
3881 }
3882
3883 // Merge the input chains if they are not intra-pattern references.
3884 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3885
3886 if (!InputChain.getNode())
3887 break; // Failed to merge.
3888 continue;
3889 }
3890
3892 assert(!InputChain.getNode() &&
3893 "EmitMergeInputChains should be the first chain producing node");
3894 // This node gets a list of nodes we matched in the input that have
3895 // chains. We want to token factor all of the input chains to these nodes
3896 // together. However, if any of the input chains is actually one of the
3897 // nodes matched in this pattern, then we have an intra-match reference.
3898 // Ignore these because the newly token factored chain should not refer to
3899 // the old nodes.
3900 unsigned NumChains = MatcherTable[MatcherIndex++];
3901 assert(NumChains != 0 && "Can't TF zero chains");
3902
3903 assert(ChainNodesMatched.empty() &&
3904 "Should only have one EmitMergeInputChains per match");
3905
3906 // Read all of the chained nodes.
3907 for (unsigned i = 0; i != NumChains; ++i) {
3908 unsigned RecNo = MatcherTable[MatcherIndex++];
3909 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3910 ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3911
3912 // If the chained node is not the root, we can't fold it if it has
3913 // multiple uses.
3914 // FIXME: What if other value results of the node have uses not matched
3915 // by this pattern?
3916 if (ChainNodesMatched.back() != NodeToMatch &&
3917 !RecordedNodes[RecNo].first.hasOneUse()) {
3918 ChainNodesMatched.clear();
3919 break;
3920 }
3921 }
3922
3923 // If the inner loop broke out, the match fails.
3924 if (ChainNodesMatched.empty())
3925 break;
3926
3927 // Merge the input chains if they are not intra-pattern references.
3928 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3929
3930 if (!InputChain.getNode())
3931 break; // Failed to merge.
3932
3933 continue;
3934 }
3935
3936 case OPC_EmitCopyToReg:
3937 case OPC_EmitCopyToReg0:
3938 case OPC_EmitCopyToReg1:
3939 case OPC_EmitCopyToReg2:
3940 case OPC_EmitCopyToReg3:
3941 case OPC_EmitCopyToReg4:
3942 case OPC_EmitCopyToReg5:
3943 case OPC_EmitCopyToReg6:
3944 case OPC_EmitCopyToReg7:
3946 unsigned RecNo =
3947 Opcode >= OPC_EmitCopyToReg0 && Opcode <= OPC_EmitCopyToReg7
3948 ? Opcode - OPC_EmitCopyToReg0
3949 : MatcherTable[MatcherIndex++];
3950 assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3951 unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3952 if (Opcode == OPC_EmitCopyToRegTwoByte)
3953 DestPhysReg |= MatcherTable[MatcherIndex++] << 8;
3954
3955 if (!InputChain.getNode())
3956 InputChain = CurDAG->getEntryNode();
3957
3958 InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
3959 DestPhysReg, RecordedNodes[RecNo].first,
3960 InputGlue);
3961
3962 InputGlue = InputChain.getValue(1);
3963 continue;
3964 }
3965
3966 case OPC_EmitNodeXForm: {
3967 unsigned XFormNo = MatcherTable[MatcherIndex++];
3968 unsigned RecNo = MatcherTable[MatcherIndex++];
3969 assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3970 SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
3971 RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
3972 continue;
3973 }
3974 case OPC_Coverage: {
3975 // This is emitted right before MorphNode/EmitNode.
3976 // So it should be safe to assume that this node has been selected
3977 unsigned index = MatcherTable[MatcherIndex++];
3978 index |= (MatcherTable[MatcherIndex++] << 8);
3979 dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3980 dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3981 continue;
3982 }
3983
3984 case OPC_EmitNode:
3985 case OPC_EmitNode0:
3986 case OPC_EmitNode1:
3987 case OPC_EmitNode2:
3988 case OPC_EmitNode0None:
3989 case OPC_EmitNode1None:
3990 case OPC_EmitNode2None:
3991 case OPC_EmitNode0Chain:
3992 case OPC_EmitNode1Chain:
3993 case OPC_EmitNode2Chain:
3994 case OPC_MorphNodeTo:
3995 case OPC_MorphNodeTo0:
3996 case OPC_MorphNodeTo1:
3997 case OPC_MorphNodeTo2:
4010 uint16_t TargetOpc = MatcherTable[MatcherIndex++];
4011 TargetOpc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
4012 unsigned EmitNodeInfo;
4013 if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2Chain) {
4014 if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
4015 EmitNodeInfo = OPFL_Chain;
4016 else
4017 EmitNodeInfo = OPFL_None;
4018 } else if (Opcode >= OPC_MorphNodeTo0None &&
4019 Opcode <= OPC_MorphNodeTo2GlueOutput) {
4020 if (Opcode >= OPC_MorphNodeTo0Chain && Opcode <= OPC_MorphNodeTo2Chain)
4021 EmitNodeInfo = OPFL_Chain;
4022 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
4023 Opcode <= OPC_MorphNodeTo2GlueInput)
4024 EmitNodeInfo = OPFL_GlueInput;
4025 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
4027 EmitNodeInfo = OPFL_GlueOutput;
4028 else
4029 EmitNodeInfo = OPFL_None;
4030 } else
4031 EmitNodeInfo = MatcherTable[MatcherIndex++];
4032 // Get the result VT list.
4033 unsigned NumVTs;
4034 // If this is one of the compressed forms, get the number of VTs based
4035 // on the Opcode. Otherwise read the next byte from the table.
4036 if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
4037 NumVTs = Opcode - OPC_MorphNodeTo0;
4038 else if (Opcode >= OPC_MorphNodeTo0None && Opcode <= OPC_MorphNodeTo2None)
4039 NumVTs = Opcode - OPC_MorphNodeTo0None;
4040 else if (Opcode >= OPC_MorphNodeTo0Chain &&
4041 Opcode <= OPC_MorphNodeTo2Chain)
4042 NumVTs = Opcode - OPC_MorphNodeTo0Chain;
4043 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
4044 Opcode <= OPC_MorphNodeTo2GlueInput)
4045 NumVTs = Opcode - OPC_MorphNodeTo0GlueInput;
4046 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
4048 NumVTs = Opcode - OPC_MorphNodeTo0GlueOutput;
4049 else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
4050 NumVTs = Opcode - OPC_EmitNode0;
4051 else if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2None)
4052 NumVTs = Opcode - OPC_EmitNode0None;
4053 else if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
4054 NumVTs = Opcode - OPC_EmitNode0Chain;
4055 else
4056 NumVTs = MatcherTable[MatcherIndex++];
4058 for (unsigned i = 0; i != NumVTs; ++i) {
4060 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
4061 if (VT == MVT::iPTR)
4062 VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
4063 VTs.push_back(VT);
4064 }
4065
4066 if (EmitNodeInfo & OPFL_Chain)
4067 VTs.push_back(MVT::Other);
4068 if (EmitNodeInfo & OPFL_GlueOutput)
4069 VTs.push_back(MVT::Glue);
4070
4071 // This is hot code, so optimize the two most common cases of 1 and 2
4072 // results.
4073 SDVTList VTList;
4074 if (VTs.size() == 1)
4075 VTList = CurDAG->getVTList(VTs[0]);
4076 else if (VTs.size() == 2)
4077 VTList = CurDAG->getVTList(VTs[0], VTs[1]);
4078 else
4079 VTList = CurDAG->getVTList(VTs);
4080
4081 // Get the operand list.
4082 unsigned NumOps = MatcherTable[MatcherIndex++];
4084 for (unsigned i = 0; i != NumOps; ++i) {
4085 unsigned RecNo = MatcherTable[MatcherIndex++];
4086 if (RecNo & 128)
4087 RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
4088
4089 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
4090 Ops.push_back(RecordedNodes[RecNo].first);
4091 }
4092
4093 // If there are variadic operands to add, handle them now.
4094 if (EmitNodeInfo & OPFL_VariadicInfo) {
4095 // Determine the start index to copy from.
4096 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
4097 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
4098 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
4099 "Invalid variadic node");
4100 // Copy all of the variadic operands, not including a potential glue
4101 // input.
4102 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
4103 i != e; ++i) {
4104 SDValue V = NodeToMatch->getOperand(i);
4105 if (V.getValueType() == MVT::Glue) break;
4106 Ops.push_back(V);
4107 }
4108 }
4109
4110 // If this has chain/glue inputs, add them.
4111 if (EmitNodeInfo & OPFL_Chain)
4112 Ops.push_back(InputChain);
4113 if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
4114 Ops.push_back(InputGlue);
4115
4116 // Check whether any matched node could raise an FP exception. Since all
4117 // such nodes must have a chain, it suffices to check ChainNodesMatched.
4118 // We need to perform this check before potentially modifying one of the
4119 // nodes via MorphNode.
4120 bool MayRaiseFPException =
4121 llvm::any_of(ChainNodesMatched, [this](SDNode *N) {
4122 return mayRaiseFPException(N) && !N->getFlags().hasNoFPExcept();
4123 });
4124
4125 // Create the node.
4126 MachineSDNode *Res = nullptr;
4127 bool IsMorphNodeTo =
4128 Opcode == OPC_MorphNodeTo ||
4129 (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2GlueOutput);
4130 if (!IsMorphNodeTo) {
4131 // If this is a normal EmitNode command, just create the new node and
4132 // add the results to the RecordedNodes list.
4133 Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
4134 VTList, Ops);
4135
4136 // Add all the non-glue/non-chain results to the RecordedNodes list.
4137 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
4138 if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
4139 RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
4140 nullptr));
4141 }
4142 } else {
4143 assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
4144 "NodeToMatch was removed partway through selection");
4146 SDNode *E) {
4148 auto &Chain = ChainNodesMatched;
4149 assert((!E || !is_contained(Chain, N)) &&
4150 "Chain node replaced during MorphNode");
4151 llvm::erase(Chain, N);
4152 });
4153 Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList,
4154 Ops, EmitNodeInfo));
4155 }
4156
4157 // Set the NoFPExcept flag when no original matched node could
4158 // raise an FP exception, but the new node potentially might.
4159 if (!MayRaiseFPException && mayRaiseFPException(Res)) {
4160 SDNodeFlags Flags = Res->getFlags();
4161 Flags.setNoFPExcept(true);
4162 Res->setFlags(Flags);
4163 }
4164
4165 // If the node had chain/glue results, update our notion of the current
4166 // chain and glue.
4167 if (EmitNodeInfo & OPFL_GlueOutput) {
4168 InputGlue = SDValue(Res, VTs.size()-1);
4169 if (EmitNodeInfo & OPFL_Chain)
4170 InputChain = SDValue(Res, VTs.size()-2);
4171 } else if (EmitNodeInfo & OPFL_Chain)
4172 InputChain = SDValue(Res, VTs.size()-1);
4173
4174 // If the OPFL_MemRefs glue is set on this node, slap all of the
4175 // accumulated memrefs onto it.
4176 //
4177 // FIXME: This is vastly incorrect for patterns with multiple outputs
4178 // instructions that access memory and for ComplexPatterns that match
4179 // loads.
4180 if (EmitNodeInfo & OPFL_MemRefs) {
4181 // Only attach load or store memory operands if the generated
4182 // instruction may load or store.
4183 const MCInstrDesc &MCID = TII->get(TargetOpc);
4184 bool mayLoad = MCID.mayLoad();
4185 bool mayStore = MCID.mayStore();
4186
4187 // We expect to have relatively few of these so just filter them into a
4188 // temporary buffer so that we can easily add them to the instruction.
4190 for (MachineMemOperand *MMO : MatchedMemRefs) {
4191 if (MMO->isLoad()) {
4192 if (mayLoad)
4193 FilteredMemRefs.push_back(MMO);
4194 } else if (MMO->isStore()) {
4195 if (mayStore)
4196 FilteredMemRefs.push_back(MMO);
4197 } else {
4198 FilteredMemRefs.push_back(MMO);
4199 }
4200 }
4201
4202 CurDAG->setNodeMemRefs(Res, FilteredMemRefs);
4203 }
4204
4205 LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
4206 << " Dropping mem operands\n";
4207 dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
4208 << " node: ";
4209 Res->dump(CurDAG););
4210
4211 // If this was a MorphNodeTo then we're completely done!
4212 if (IsMorphNodeTo) {
4213 // Update chain uses.
4214 UpdateChains(Res, InputChain, ChainNodesMatched, true);
4215 return;
4216 }
4217 continue;
4218 }
4219
4220 case OPC_CompleteMatch: {
4221 // The match has been completed, and any new nodes (if any) have been
4222 // created. Patch up references to the matched dag to use the newly
4223 // created nodes.
4224 unsigned NumResults = MatcherTable[MatcherIndex++];
4225
4226 for (unsigned i = 0; i != NumResults; ++i) {
4227 unsigned ResSlot = MatcherTable[MatcherIndex++];
4228 if (ResSlot & 128)
4229 ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
4230
4231 assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
4232 SDValue Res = RecordedNodes[ResSlot].first;
4233
4234 assert(i < NodeToMatch->getNumValues() &&
4235 NodeToMatch->getValueType(i) != MVT::Other &&
4236 NodeToMatch->getValueType(i) != MVT::Glue &&
4237 "Invalid number of results to complete!");
4238 assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
4239 NodeToMatch->getValueType(i) == MVT::iPTR ||
4240 Res.getValueType() == MVT::iPTR ||
4241 NodeToMatch->getValueType(i).getSizeInBits() ==
4242 Res.getValueSizeInBits()) &&
4243 "invalid replacement");
4244 ReplaceUses(SDValue(NodeToMatch, i), Res);
4245 }
4246
4247 // Update chain uses.
4248 UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
4249
4250 // If the root node defines glue, we need to update it to the glue result.
4251 // TODO: This never happens in our tests and I think it can be removed /
4252 // replaced with an assert, but if we do it this the way the change is
4253 // NFC.
4254 if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
4255 MVT::Glue &&
4256 InputGlue.getNode())
4257 ReplaceUses(SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
4258 InputGlue);
4259
4260 assert(NodeToMatch->use_empty() &&
4261 "Didn't replace all uses of the node?");
4262 CurDAG->RemoveDeadNode(NodeToMatch);
4263
4264 return;
4265 }
4266 }
4267
4268 // If the code reached this point, then the match failed. See if there is
4269 // another child to try in the current 'Scope', otherwise pop it until we
4270 // find a case to check.
4271 LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
4272 << "\n");
4273 ++NumDAGIselRetries;
4274 while (true) {
4275 if (MatchScopes.empty()) {
4276 CannotYetSelect(NodeToMatch);
4277 return;
4278 }
4279
4280 // Restore the interpreter state back to the point where the scope was
4281 // formed.
4282 MatchScope &LastScope = MatchScopes.back();
4283 RecordedNodes.resize(LastScope.NumRecordedNodes);
4284 NodeStack.clear();
4285 NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
4286 N = NodeStack.back();
4287
4288 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
4289 MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
4290 MatcherIndex = LastScope.FailIndex;
4291
4292 LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
4293
4294 InputChain = LastScope.InputChain;
4295 InputGlue = LastScope.InputGlue;
4296 if (!LastScope.HasChainNodesMatched)
4297 ChainNodesMatched.clear();
4298
4299 // Check to see what the offset is at the new MatcherIndex. If it is zero
4300 // we have reached the end of this scope, otherwise we have another child
4301 // in the current scope to try.
4302 unsigned NumToSkip = MatcherTable[MatcherIndex++];
4303 if (NumToSkip & 128)
4304 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
4305
4306 // If we have another child in this scope to match, update FailIndex and
4307 // try it.
4308 if (NumToSkip != 0) {
4309 LastScope.FailIndex = MatcherIndex+NumToSkip;
4310 break;
4311 }
4312
4313 // End of this scope, pop it and try the next child in the containing
4314 // scope.
4315 MatchScopes.pop_back();
4316 }
4317 }
4318}
4319
4320/// Return whether the node may raise an FP exception.
4322 // For machine opcodes, consult the MCID flag.
4323 if (N->isMachineOpcode()) {
4324 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
4325 return MCID.mayRaiseFPException();
4326 }
4327
4328 // For ISD opcodes, only StrictFP opcodes may raise an FP
4329 // exception.
4330 if (N->isTargetOpcode())
4331 return N->isTargetStrictFPOpcode();
4332 return N->isStrictFPOpcode();
4333}
4334
4336 assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
4337 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
4338 if (!C)
4339 return false;
4340
4341 // Detect when "or" is used to add an offset to a stack object.
4342 if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) {
4344 Align A = MFI.getObjectAlign(FN->getIndex());
4345 int32_t Off = C->getSExtValue();
4346 // If the alleged offset fits in the zero bits guaranteed by
4347 // the alignment, then this or is really an add.
4348 return (Off >= 0) && (((A.value() - 1) & Off) == unsigned(Off));
4349 }
4350 return false;
4351}
4352
4353void SelectionDAGISel::CannotYetSelect(SDNode *N) {
4354 std::string msg;
4355 raw_string_ostream Msg(msg);
4356 Msg << "Cannot select: ";
4357
4358 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
4359 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
4360 N->getOpcode() != ISD::INTRINSIC_VOID) {
4361 N->printrFull(Msg, CurDAG);
4362 Msg << "\nIn function: " << MF->getName();
4363 } else {
4364 bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
4365 unsigned iid = N->getConstantOperandVal(HasInputChain);
4366 if (iid < Intrinsic::num_intrinsics)
4367 Msg << "intrinsic %" << Intrinsic::getBaseName((Intrinsic::ID)iid);
4368 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
4369 Msg << "target intrinsic %" << TII->getName(iid);
4370 else
4371 Msg << "unknown intrinsic #" << iid;
4372 }
4374}
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineInstrBuilder & UseMI
amdgpu AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Expand Atomic instructions
BlockVerifier::State From
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ATTRIBUTE_ALWAYS_INLINE
LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do so, mark a method "always...
Definition: Compiler.h:261
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
bool End
Definition: ELF_riscv.cpp:480
This file defines the FastISel class.
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
Machine Instruction Scheduler
unsigned const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
const char LLVMTargetMachineRef TM
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
static cl::opt< bool > ViewSUnitDAGs("view-sunit-dags", cl::Hidden, cl::desc("Pop up a window to show SUnit dags after they are processed"))
static cl::opt< bool > ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden, cl::desc("Pop up a window to show dags before the post " "legalize types dag combine pass"))
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckPatternPredicate(unsigned Opcode, const unsigned char *MatcherTable, unsigned &MatcherIndex, const SelectionDAGISel &SDISel)
CheckPatternPredicate - Implements OP_CheckPatternPredicate.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode * > > &RecordedNodes, unsigned ChildNo)
CheckChildSame - Implements OP_CheckChildXSame.
static uint64_t decodeSignRotatedValue(uint64_t V)
Decode a signed value stored with the sign bit in the LSB for dense VBR encoding.
static cl::opt< bool > ViewISelDAGs("view-isel-dags", cl::Hidden, cl::desc("Pop up a window to show isel dags as they are selected"))
static LLVM_ATTRIBUTE_ALWAYS_INLINE uint64_t GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx)
GetVBR - decode a vbr encoding whose top bit is set.
static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F, MachineModuleInfo &MMI)
static void reportFastISelFailure(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R, bool ShouldAbort)
static cl::opt< bool > ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden, cl::desc("Pop up a window to show dags before the second " "dag combine pass"))
static RegisterScheduler defaultListDAGScheduler("default", "Best scheduler for the target", createDefaultScheduler)
static unsigned IsPredicateKnownToFail(const unsigned char *Table, unsigned Index, SDValue N, bool &Result, const SelectionDAGISel &SDISel, SmallVectorImpl< std::pair< SDValue, SDNode * > > &RecordedNodes)
IsPredicateKnownToFail - If we know how and can do so without pushing a scope, evaluate the current n...
static cl::opt< int > EnableFastISelAbort("fast-isel-abort", cl::Hidden, cl::desc("Enable abort calls when \"fast\" instruction selection " "fails to lower an instruction: 0 disable the abort, 1 will " "abort but for args, calls and terminators, 2 will also " "abort for argument lowering, and 3 will never fallback " "to SelectionDAG."))
static void mapWasmLandingPadIndex(MachineBasicBlock *MBB, const CatchPadInst *CPI)
#define ISEL_DUMP(X)
static void processSingleLocVars(FunctionLoweringInfo &FuncInfo, FunctionVarLocs const *FnVarLocs)
Collect single location variable information generated with assignment tracking.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SelectionDAGISel &SDISel)
static cl::opt< bool > UseMBPI("use-mbpi", cl::desc("use Machine Branch Probability Info"), cl::init(true), cl::Hidden)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChildType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL, unsigned ChildNo)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const SmallVectorImpl< std::pair< SDValue, SDNode * > > &RecordedNodes)
CheckSame - Implements OP_CheckSame.
static bool dontUseFastISelFor(const Function &Fn)
static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse, bool IgnoreChains)
findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path beyond "ImmedUse".
static cl::opt< bool > ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden, cl::desc("Pop up a window to show dags before the first " "dag combine pass"))
static bool processIfEntryValueDbgDeclare(FunctionLoweringInfo &FuncInfo, const Value *Arg, DIExpression *Expr, DILocalVariable *Var, DebugLoc DbgLoc)
static cl::opt< bool > ViewSchedDAGs("view-sched-dags", cl::Hidden, cl::desc("Pop up a window to show sched dags as they are processed"))
static void processDbgDeclares(FunctionLoweringInfo &FuncInfo)
Collect llvm.dbg.declare information.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N)
static cl::opt< bool > ViewLegalizeDAGs("view-legalize-dags", cl::Hidden, cl::desc("Pop up a window to show dags before legalize"))
static cl::opt< bool > ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden, cl::desc("Pop up a window to show dags before legalize types"))
static SDNode * findGlueUse(SDNode *N)
findGlueUse - Return use of MVT::Glue value produced by the specified SDNode.
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckNodePredicate(unsigned Opcode, const unsigned char *MatcherTable, unsigned &MatcherIndex, const SelectionDAGISel &SDISel, SDNode *N)
CheckNodePredicate - Implements OP_CheckNodePredicate.
static cl::opt< RegisterScheduler::FunctionPassCtor, false, RegisterPassParser< RegisterScheduler > > ISHeuristic("pre-RA-sched", cl::init(&createDefaultScheduler), cl::Hidden, cl::desc("Instruction schedulers available (before register" " allocation):"))
ISHeuristic command line option for instruction schedulers.
static cl::opt< bool > EnableFastISelFallbackReport("fast-isel-report-on-fallback", cl::Hidden, cl::desc("Emit a diagnostic when \"fast\" instruction selection " "falls back to SelectionDAG."))
static bool processDbgDeclare(FunctionLoweringInfo &FuncInfo, const Value *Address, DIExpression *Expr, DILocalVariable *Var, DebugLoc DbgLoc)
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDNode *N)