LLVM 22.0.0git
WebAssemblyRegStackify.cpp
Go to the documentation of this file.
1//===-- WebAssemblyRegStackify.cpp - Register Stackification --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a register stacking pass.
11///
12/// This pass reorders instructions to put register uses and defs in an order
13/// such that they form single-use expression trees. Registers fitting this form
14/// are then marked as "stackified", meaning references to them are replaced by
15/// "push" and "pop" from the value stack.
16///
17/// This is primarily a code size optimization, since temporary values on the
18/// value stack don't need to be named.
19///
20//===----------------------------------------------------------------------===//
21
22#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" // for WebAssembly::ARGUMENT_*
23#include "WebAssembly.h"
33#include "llvm/CodeGen/Passes.h"
34#include "llvm/IR/GlobalAlias.h"
35#include "llvm/Support/Debug.h"
37#include <iterator>
38using namespace llvm;
39
40#define DEBUG_TYPE "wasm-reg-stackify"
41
42namespace {
43class WebAssemblyRegStackify final : public MachineFunctionPass {
44 bool Optimize;
45
46 StringRef getPassName() const override {
47 return "WebAssembly Register Stackify";
48 }
49
50 void getAnalysisUsage(AnalysisUsage &AU) const override {
51 AU.setPreservesCFG();
52 if (Optimize) {
55 }
62 }
63
64 bool runOnMachineFunction(MachineFunction &MF) override;
65
66public:
67 static char ID; // Pass identification, replacement for typeid
68 WebAssemblyRegStackify(CodeGenOptLevel OptLevel)
70 WebAssemblyRegStackify() : WebAssemblyRegStackify(CodeGenOptLevel::Default) {}
71};
72} // end anonymous namespace
73
74char WebAssemblyRegStackify::ID = 0;
75INITIALIZE_PASS(WebAssemblyRegStackify, DEBUG_TYPE,
76 "Reorder instructions to use the WebAssembly value stack",
77 false, false)
78
80 return new WebAssemblyRegStackify(OptLevel);
81}
82
83// Decorate the given instruction with implicit operands that enforce the
84// expression stack ordering constraints for an instruction which is on
85// the expression stack.
87 // Write the opaque VALUE_STACK register.
88 if (!MI->definesRegister(WebAssembly::VALUE_STACK, /*TRI=*/nullptr))
89 MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
90 /*isDef=*/true,
91 /*isImp=*/true));
92
93 // Also read the opaque VALUE_STACK register.
94 if (!MI->readsRegister(WebAssembly::VALUE_STACK, /*TRI=*/nullptr))
95 MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
96 /*isDef=*/false,
97 /*isImp=*/true));
98}
99
100// Convert an IMPLICIT_DEF instruction into an instruction which defines
101// a constant zero value.
104 const TargetInstrInfo *TII,
105 MachineFunction &MF) {
106 assert(MI->getOpcode() == TargetOpcode::IMPLICIT_DEF);
107
108 const auto *RegClass = MRI.getRegClass(MI->getOperand(0).getReg());
109 if (RegClass == &WebAssembly::I32RegClass) {
110 MI->setDesc(TII->get(WebAssembly::CONST_I32));
111 MI->addOperand(MachineOperand::CreateImm(0));
112 } else if (RegClass == &WebAssembly::I64RegClass) {
113 MI->setDesc(TII->get(WebAssembly::CONST_I64));
114 MI->addOperand(MachineOperand::CreateImm(0));
115 } else if (RegClass == &WebAssembly::F32RegClass) {
116 MI->setDesc(TII->get(WebAssembly::CONST_F32));
119 MI->addOperand(MachineOperand::CreateFPImm(Val));
120 } else if (RegClass == &WebAssembly::F64RegClass) {
121 MI->setDesc(TII->get(WebAssembly::CONST_F64));
124 MI->addOperand(MachineOperand::CreateFPImm(Val));
125 } else if (RegClass == &WebAssembly::V128RegClass) {
126 MI->setDesc(TII->get(WebAssembly::CONST_V128_I64x2));
127 MI->addOperand(MachineOperand::CreateImm(0));
128 MI->addOperand(MachineOperand::CreateImm(0));
129 } else {
130 llvm_unreachable("Unexpected reg class");
131 }
132}
133
134// Determine whether a call to the callee referenced by
135// MI->getOperand(CalleeOpNo) reads memory, writes memory, and/or has side
136// effects.
137static void queryCallee(const MachineInstr &MI, bool &Read, bool &Write,
138 bool &Effects, bool &StackPointer) {
139 // All calls can use the stack pointer.
140 StackPointer = true;
141
143 if (MO.isGlobal()) {
144 const Constant *GV = MO.getGlobal();
145 if (const auto *GA = dyn_cast<GlobalAlias>(GV))
146 if (!GA->isInterposable())
147 GV = GA->getAliasee();
148
149 if (const auto *F = dyn_cast<Function>(GV)) {
150 if (!F->doesNotThrow())
151 Effects = true;
152 if (F->doesNotAccessMemory())
153 return;
154 if (F->onlyReadsMemory()) {
155 Read = true;
156 return;
157 }
158 }
159 }
160
161 // Assume the worst.
162 Write = true;
163 Read = true;
164 Effects = true;
165}
166
167// Determine whether MI reads memory, writes memory, has side effects,
168// and/or uses the stack pointer value.
169static void query(const MachineInstr &MI, bool &Read, bool &Write,
170 bool &Effects, bool &StackPointer) {
171 assert(!MI.isTerminator());
172
173 if (MI.isDebugInstr() || MI.isPosition())
174 return;
175
176 // Check for loads.
177 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
178 Read = true;
179
180 // Check for stores.
181 if (MI.mayStore()) {
182 Write = true;
183 } else if (MI.hasOrderedMemoryRef()) {
184 switch (MI.getOpcode()) {
185 case WebAssembly::DIV_S_I32:
186 case WebAssembly::DIV_S_I64:
187 case WebAssembly::REM_S_I32:
188 case WebAssembly::REM_S_I64:
189 case WebAssembly::DIV_U_I32:
190 case WebAssembly::DIV_U_I64:
191 case WebAssembly::REM_U_I32:
192 case WebAssembly::REM_U_I64:
193 case WebAssembly::I32_TRUNC_S_F32:
194 case WebAssembly::I64_TRUNC_S_F32:
195 case WebAssembly::I32_TRUNC_S_F64:
196 case WebAssembly::I64_TRUNC_S_F64:
197 case WebAssembly::I32_TRUNC_U_F32:
198 case WebAssembly::I64_TRUNC_U_F32:
199 case WebAssembly::I32_TRUNC_U_F64:
200 case WebAssembly::I64_TRUNC_U_F64:
201 // These instruction have hasUnmodeledSideEffects() returning true
202 // because they trap on overflow and invalid so they can't be arbitrarily
203 // moved, however hasOrderedMemoryRef() interprets this plus their lack
204 // of memoperands as having a potential unknown memory reference.
205 break;
206 default:
207 // Record volatile accesses, unless it's a call, as calls are handled
208 // specially below.
209 if (!MI.isCall()) {
210 Write = true;
211 Effects = true;
212 }
213 break;
214 }
215 }
216
217 // Check for side effects.
218 if (MI.hasUnmodeledSideEffects()) {
219 switch (MI.getOpcode()) {
220 case WebAssembly::DIV_S_I32:
221 case WebAssembly::DIV_S_I64:
222 case WebAssembly::REM_S_I32:
223 case WebAssembly::REM_S_I64:
224 case WebAssembly::DIV_U_I32:
225 case WebAssembly::DIV_U_I64:
226 case WebAssembly::REM_U_I32:
227 case WebAssembly::REM_U_I64:
228 case WebAssembly::I32_TRUNC_S_F32:
229 case WebAssembly::I64_TRUNC_S_F32:
230 case WebAssembly::I32_TRUNC_S_F64:
231 case WebAssembly::I64_TRUNC_S_F64:
232 case WebAssembly::I32_TRUNC_U_F32:
233 case WebAssembly::I64_TRUNC_U_F32:
234 case WebAssembly::I32_TRUNC_U_F64:
235 case WebAssembly::I64_TRUNC_U_F64:
236 // These instructions have hasUnmodeledSideEffects() returning true
237 // because they trap on overflow and invalid so they can't be arbitrarily
238 // moved, however in the specific case of register stackifying, it is safe
239 // to move them because overflow and invalid are Undefined Behavior.
240 break;
241 default:
242 Effects = true;
243 break;
244 }
245 }
246
247 // Check for writes to __stack_pointer global.
248 if ((MI.getOpcode() == WebAssembly::GLOBAL_SET_I32 ||
249 MI.getOpcode() == WebAssembly::GLOBAL_SET_I64) &&
250 MI.getOperand(0).isSymbol() &&
251 !strcmp(MI.getOperand(0).getSymbolName(), "__stack_pointer"))
252 StackPointer = true;
253
254 // Analyze calls.
255 if (MI.isCall()) {
256 queryCallee(MI, Read, Write, Effects, StackPointer);
257 }
258}
259
260// Test whether Def is safe and profitable to rematerialize.
261static bool shouldRematerialize(const MachineInstr &Def,
262 const WebAssemblyInstrInfo *TII) {
263 return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def);
264}
265
266// Identify the definition for this register at this point. This is a
267// generalization of MachineRegisterInfo::getUniqueVRegDef that uses
268// LiveIntervals to handle complex cases.
269static MachineInstr *getVRegDef(unsigned Reg, const MachineInstr *Insert,
271 const LiveIntervals *LIS) {
272 // Most registers are in SSA form here so we try a quick MRI query first.
273 if (MachineInstr *Def = MRI.getUniqueVRegDef(Reg))
274 return Def;
275
276 // MRI doesn't know what the Def is. Try asking LIS.
277 if (LIS != nullptr) {
278 SlotIndex InstIndex = LIS->getInstructionIndex(*Insert);
279 if (const VNInfo *ValNo = LIS->getInterval(Reg).getVNInfoBefore(InstIndex))
280 return LIS->getInstructionFromIndex(ValNo->def);
281 }
282
283 return nullptr;
284}
285
286// Test whether Reg, as defined at Def, has exactly one use. This is a
287// generalization of MachineRegisterInfo::hasOneNonDBGUse that uses
288// LiveIntervals to handle complex cases in optimized code.
291 MachineInstr *Def, LiveIntervals *LIS) {
292 if (!Optimize) {
293 // Using "hasOneUse" instead of "hasOneNonDBGUse" here because we don't
294 // want to stackify DBG_VALUE operands - WASM stack locations are less
295 // useful and less widely supported than WASM local locations.
296 if (!MRI.hasOneUse(Reg))
297 return false;
298 // The frame base always has an implicit DBG use as DW_AT_frame_base.
299 if (MFI.isFrameBaseVirtual() && MFI.getFrameBaseVreg() == Reg)
300 return false;
301 return true;
302 }
303
304 // Most registers are in SSA form here so we try a quick MRI query first.
305 if (MRI.hasOneNonDBGUse(Reg))
306 return true;
307
308 if (LIS == nullptr)
309 return false;
310
311 bool HasOne = false;
312 const LiveInterval &LI = LIS->getInterval(Reg);
313 const VNInfo *DefVNI =
315 assert(DefVNI);
316 for (auto &I : MRI.use_nodbg_operands(Reg)) {
317 const auto &Result = LI.Query(LIS->getInstructionIndex(*I.getParent()));
318 if (Result.valueIn() == DefVNI) {
319 if (!Result.isKill())
320 return false;
321 if (HasOne)
322 return false;
323 HasOne = true;
324 }
325 }
326 return HasOne;
327}
328
329// Test whether it's safe to move Def to just before Insert.
330// TODO: Compute memory dependencies in a way that doesn't require always
331// walking the block.
332// TODO: Compute memory dependencies in a way that uses AliasAnalysis to be
333// more precise.
334static bool isSafeToMove(const MachineOperand *Def, const MachineOperand *Use,
335 const MachineInstr *Insert,
336 const WebAssemblyFunctionInfo &MFI,
337 const MachineRegisterInfo &MRI, bool Optimize) {
338 const MachineInstr *DefI = Def->getParent();
339 const MachineInstr *UseI = Use->getParent();
340 assert(DefI->getParent() == Insert->getParent());
341 assert(UseI->getParent() == Insert->getParent());
342
343 // The first def of a multivalue instruction can be stackified by moving,
344 // since the later defs can always be placed into locals if necessary. Later
345 // defs can only be stackified if all previous defs are already stackified
346 // since ExplicitLocals will not know how to place a def in a local if a
347 // subsequent def is stackified. But only one def can be stackified by moving
348 // the instruction, so it must be the first one.
349 //
350 // TODO: This could be loosened to be the first *live* def, but care would
351 // have to be taken to ensure the drops of the initial dead defs can be
352 // placed. This would require checking that no previous defs are used in the
353 // same instruction as subsequent defs.
354 if (Def != DefI->defs().begin())
355 return false;
356
357 // If any subsequent def is used prior to the current value by the same
358 // instruction in which the current value is used, we cannot
359 // stackify. Stackifying in this case would require that def moving below the
360 // current def in the stack, which cannot be achieved, even with locals.
361 // Also ensure we don't sink the def past any other prior uses.
362 for (const auto &SubsequentDef : drop_begin(DefI->defs())) {
363 auto I = std::next(MachineBasicBlock::const_iterator(DefI));
364 auto E = std::next(MachineBasicBlock::const_iterator(UseI));
365 for (; I != E; ++I) {
366 for (const auto &PriorUse : I->uses()) {
367 if (&PriorUse == Use)
368 break;
369 if (PriorUse.isReg() && SubsequentDef.getReg() == PriorUse.getReg())
370 return false;
371 }
372 }
373 }
374
375 // If moving is a semantic nop, it is always allowed
376 const MachineBasicBlock *MBB = DefI->getParent();
377 auto NextI = std::next(MachineBasicBlock::const_iterator(DefI));
378 for (auto E = MBB->end(); NextI != E && NextI->isDebugInstr(); ++NextI)
379 ;
380 if (NextI == Insert)
381 return true;
382
383 // When not optimizing, we only handle the trivial case above
384 // to guarantee no impact to debugging and to avoid spending
385 // compile time.
386 if (!Optimize)
387 return false;
388
389 // 'catch' and 'catch_all' should be the first instruction of a BB and cannot
390 // move.
391 if (WebAssembly::isCatch(DefI->getOpcode()))
392 return false;
393
394 // Check for register dependencies.
395 SmallVector<unsigned, 4> MutableRegisters;
396 for (const MachineOperand &MO : DefI->operands()) {
397 if (!MO.isReg() || MO.isUndef())
398 continue;
399 Register Reg = MO.getReg();
400
401 // If the register is dead here and at Insert, ignore it.
402 if (MO.isDead() && Insert->definesRegister(Reg, /*TRI=*/nullptr) &&
403 !Insert->readsRegister(Reg, /*TRI=*/nullptr))
404 continue;
405
406 if (Reg.isPhysical()) {
407 // Ignore ARGUMENTS; it's just used to keep the ARGUMENT_* instructions
408 // from moving down, and we've already checked for that.
409 if (Reg == WebAssembly::ARGUMENTS)
410 continue;
411 // If the physical register is never modified, ignore it.
412 if (!MRI.isPhysRegModified(Reg))
413 continue;
414 // Otherwise, it's a physical register with unknown liveness.
415 return false;
416 }
417
418 // If one of the operands isn't in SSA form, it has different values at
419 // different times, and we need to make sure we don't move our use across
420 // a different def.
421 if (!MO.isDef() && !MRI.hasOneDef(Reg))
422 MutableRegisters.push_back(Reg);
423 }
424
425 bool Read = false, Write = false, Effects = false, StackPointer = false;
426 query(*DefI, Read, Write, Effects, StackPointer);
427
428 // If the instruction does not access memory and has no side effects, it has
429 // no additional dependencies.
430 bool HasMutableRegisters = !MutableRegisters.empty();
431 if (!Read && !Write && !Effects && !StackPointer && !HasMutableRegisters)
432 return true;
433
434 // Scan through the intervening instructions between DefI and Insert.
436 for (--I; I != D; --I) {
437 bool InterveningRead = false;
438 bool InterveningWrite = false;
439 bool InterveningEffects = false;
440 bool InterveningStackPointer = false;
441 query(*I, InterveningRead, InterveningWrite, InterveningEffects,
442 InterveningStackPointer);
443 if (Effects && InterveningEffects)
444 return false;
445 if (Read && InterveningWrite)
446 return false;
447 if (Write && (InterveningRead || InterveningWrite))
448 return false;
449 if (StackPointer && InterveningStackPointer)
450 return false;
451
452 for (unsigned Reg : MutableRegisters)
453 for (const MachineOperand &MO : I->operands())
454 if (MO.isReg() && MO.isDef() && MO.getReg() == Reg)
455 return false;
456 }
457
458 return true;
459}
460
461/// Test whether OneUse, a use of Reg, dominates all of Reg's other uses.
462static bool oneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
463 const MachineBasicBlock &MBB,
465 const MachineDominatorTree &MDT,
466 LiveIntervals &LIS,
468 const LiveInterval &LI = LIS.getInterval(Reg);
469
470 const MachineInstr *OneUseInst = OneUse.getParent();
471 VNInfo *OneUseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*OneUseInst));
472
473 for (const MachineOperand &Use : MRI.use_nodbg_operands(Reg)) {
474 if (&Use == &OneUse)
475 continue;
476
477 const MachineInstr *UseInst = Use.getParent();
478 VNInfo *UseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*UseInst));
479
480 if (UseVNI != OneUseVNI)
481 continue;
482
483 if (UseInst == OneUseInst) {
484 // Another use in the same instruction. We need to ensure that the one
485 // selected use happens "before" it.
486 if (&OneUse > &Use)
487 return false;
488 } else {
489 // Test that the use is dominated by the one selected use.
490 while (!MDT.dominates(OneUseInst, UseInst)) {
491 // Actually, dominating is over-conservative. Test that the use would
492 // happen after the one selected use in the stack evaluation order.
493 //
494 // This is needed as a consequence of using implicit local.gets for
495 // uses and implicit local.sets for defs.
496 if (UseInst->getDesc().getNumDefs() == 0)
497 return false;
498 const MachineOperand &MO = UseInst->getOperand(0);
499 if (!MO.isReg())
500 return false;
501 Register DefReg = MO.getReg();
502 if (!DefReg.isVirtual() || !MFI.isVRegStackified(DefReg))
503 return false;
504 assert(MRI.hasOneNonDBGUse(DefReg));
505 const MachineOperand &NewUse = *MRI.use_nodbg_begin(DefReg);
506 const MachineInstr *NewUseInst = NewUse.getParent();
507 if (NewUseInst == OneUseInst) {
508 if (&OneUse > &NewUse)
509 return false;
510 break;
511 }
512 UseInst = NewUseInst;
513 }
514 }
515 }
516 return true;
517}
518
519/// Get the appropriate tee opcode for the given register class.
520static unsigned getTeeOpcode(const TargetRegisterClass *RC) {
521 if (RC == &WebAssembly::I32RegClass)
522 return WebAssembly::TEE_I32;
523 if (RC == &WebAssembly::I64RegClass)
524 return WebAssembly::TEE_I64;
525 if (RC == &WebAssembly::F32RegClass)
526 return WebAssembly::TEE_F32;
527 if (RC == &WebAssembly::F64RegClass)
528 return WebAssembly::TEE_F64;
529 if (RC == &WebAssembly::V128RegClass)
530 return WebAssembly::TEE_V128;
531 if (RC == &WebAssembly::EXTERNREFRegClass)
532 return WebAssembly::TEE_EXTERNREF;
533 if (RC == &WebAssembly::FUNCREFRegClass)
534 return WebAssembly::TEE_FUNCREF;
535 if (RC == &WebAssembly::EXNREFRegClass)
536 return WebAssembly::TEE_EXNREF;
537 llvm_unreachable("Unexpected register class");
538}
539
540// Shrink LI to its uses, cleaning up LI.
542 if (LIS.shrinkToUses(&LI)) {
544 LIS.splitSeparateComponents(LI, SplitLIs);
545 }
546}
547
548/// A single-use def in the same block with no intervening memory or register
549/// dependencies; move the def down and nest it with the current instruction.
552 MachineInstr *Insert, LiveIntervals *LIS,
555 LLVM_DEBUG(dbgs() << "Move for single use: "; Def->dump());
556
558 DefDIs.sink(Insert);
559 if (LIS != nullptr)
560 LIS->handleMove(*Def);
561
562 if (MRI.hasOneDef(Reg) && MRI.hasOneNonDBGUse(Reg)) {
563 // No one else is using this register for anything so we can just stackify
564 // it in place.
565 MFI.stackifyVReg(MRI, Reg);
566 } else {
567 // The register may have unrelated uses or defs; create a new register for
568 // just our one def and use so that we can stackify it.
569 Register NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
570 Op.setReg(NewReg);
571 DefDIs.updateReg(NewReg);
572
573 if (LIS != nullptr) {
574 // Tell LiveIntervals about the new register.
576
577 // Tell LiveIntervals about the changes to the old register.
578 LiveInterval &LI = LIS->getInterval(Reg);
580 LIS->getInstructionIndex(*Op.getParent()).getRegSlot(),
581 /*RemoveDeadValNo=*/true);
582 }
583
584 MFI.stackifyVReg(MRI, NewReg);
585 LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
586 }
587
589 return Def;
590}
591
593 for (auto *I = MI->getPrevNode(); I; I = I->getPrevNode())
594 if (!I->isDebugInstr())
595 return I;
596 return nullptr;
597}
598
599/// A trivially cloneable instruction; clone it and nest the new copy with the
600/// current instruction.
601static MachineInstr *
606 const WebAssemblyInstrInfo *TII) {
607 LLVM_DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
608 LLVM_DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
609
610 WebAssemblyDebugValueManager DefDIs(&Def);
611
612 Register NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
613 DefDIs.cloneSink(&*Insert, NewReg);
614 Op.setReg(NewReg);
615 MachineInstr *Clone = getPrevNonDebugInst(&*Insert);
616 assert(Clone);
617 LIS.InsertMachineInstrInMaps(*Clone);
619 MFI.stackifyVReg(MRI, NewReg);
620 imposeStackOrdering(Clone);
621
622 LLVM_DEBUG(dbgs() << " - Cloned to "; Clone->dump());
623
624 // Shrink the interval.
625 bool IsDead = MRI.use_empty(Reg);
626 if (!IsDead) {
627 LiveInterval &LI = LIS.getInterval(Reg);
628 shrinkToUses(LI, LIS);
630 }
631
632 // If that was the last use of the original, delete the original.
633 if (IsDead) {
634 LLVM_DEBUG(dbgs() << " - Deleting original\n");
636 LIS.removePhysRegDefAt(MCRegister::from(WebAssembly::ARGUMENTS), Idx);
637 LIS.removeInterval(Reg);
639 DefDIs.removeDef();
640 }
641
642 return Clone;
643}
644
645/// A multiple-use def in the same block with no intervening memory or register
646/// dependencies; move the def down, nest it with the current instruction, and
647/// insert a tee to satisfy the rest of the uses. As an illustration, rewrite
648/// this:
649///
650/// Reg = INST ... // Def
651/// INST ..., Reg, ... // Insert
652/// INST ..., Reg, ...
653/// INST ..., Reg, ...
654///
655/// to this:
656///
657/// DefReg = INST ... // Def (to become the new Insert)
658/// TeeReg, Reg = TEE_... DefReg
659/// INST ..., TeeReg, ... // Insert
660/// INST ..., Reg, ...
661/// INST ..., Reg, ...
662///
663/// with DefReg and TeeReg stackified. This eliminates a local.get from the
664/// resulting code.
669 LLVM_DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
670
671 const auto *RegClass = MRI.getRegClass(Reg);
672 Register TeeReg = MRI.createVirtualRegister(RegClass);
673 Register DefReg = MRI.createVirtualRegister(RegClass);
674
675 // Move Def into place.
677 DefDIs.sink(Insert);
678 LIS.handleMove(*Def);
679
680 // Create the Tee and attach the registers.
681 MachineOperand &DefMO = Def->getOperand(0);
682 MachineInstr *Tee = BuildMI(MBB, Insert, Insert->getDebugLoc(),
683 TII->get(getTeeOpcode(RegClass)), TeeReg)
685 .addReg(DefReg, getUndefRegState(DefMO.isDead()));
686 Op.setReg(TeeReg);
687 DefDIs.updateReg(DefReg);
688 SlotIndex TeeIdx = LIS.InsertMachineInstrInMaps(*Tee).getRegSlot();
689 SlotIndex DefIdx = LIS.getInstructionIndex(*Def).getRegSlot();
690
691 // Tell LiveIntervals we moved the original vreg def from Def to Tee.
692 LiveInterval &LI = LIS.getInterval(Reg);
694 VNInfo *ValNo = LI.getVNInfoAt(DefIdx);
695 I->start = TeeIdx;
696 ValNo->def = TeeIdx;
697 shrinkToUses(LI, LIS);
698
699 // Finish stackifying the new regs.
702 MFI.stackifyVReg(MRI, DefReg);
703 MFI.stackifyVReg(MRI, TeeReg);
706
707 // Even though 'TeeReg, Reg = TEE ...', has two defs, we don't need to clone
708 // DBG_VALUEs for both of them, given that the latter will cancel the former
709 // anyway. Here we only clone DBG_VALUEs for TeeReg, which will be converted
710 // to a local index in ExplicitLocals pass.
711 DefDIs.cloneSink(Insert, TeeReg, /* CloneDef */ false);
712
713 LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
714 LLVM_DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
715 return Def;
716}
717
718namespace {
719/// A stack for walking the tree of instructions being built, visiting the
720/// MachineOperands in DFS order.
721class TreeWalkerState {
722 using mop_iterator = MachineInstr::mop_iterator;
723 using mop_reverse_iterator = std::reverse_iterator<mop_iterator>;
724 using RangeTy = iterator_range<mop_reverse_iterator>;
726
727public:
728 explicit TreeWalkerState(MachineInstr *Insert) {
729 const iterator_range<mop_iterator> &Range = Insert->explicit_uses();
730 if (!Range.empty())
731 Worklist.push_back(reverse(Range));
732 }
733
734 bool done() const { return Worklist.empty(); }
735
736 MachineOperand &pop() {
737 RangeTy &Range = Worklist.back();
738 MachineOperand &Op = *Range.begin();
740 if (Range.empty())
741 Worklist.pop_back();
742 assert((Worklist.empty() || !Worklist.back().empty()) &&
743 "Empty ranges shouldn't remain in the worklist");
744 return Op;
745 }
746
747 /// Push Instr's operands onto the stack to be visited.
748 void pushOperands(MachineInstr *Instr) {
749 const iterator_range<mop_iterator> &Range(Instr->explicit_uses());
750 if (!Range.empty())
751 Worklist.push_back(reverse(Range));
752 }
753
754 /// Some of Instr's operands are on the top of the stack; remove them and
755 /// re-insert them starting from the beginning (because we've commuted them).
756 void resetTopOperands(MachineInstr *Instr) {
757 assert(hasRemainingOperands(Instr) &&
758 "Reseting operands should only be done when the instruction has "
759 "an operand still on the stack");
760 Worklist.back() = reverse(Instr->explicit_uses());
761 }
762
763 /// Test whether Instr has operands remaining to be visited at the top of
764 /// the stack.
765 bool hasRemainingOperands(const MachineInstr *Instr) const {
766 if (Worklist.empty())
767 return false;
768 const RangeTy &Range = Worklist.back();
769 return !Range.empty() && Range.begin()->getParent() == Instr;
770 }
771
772 /// Test whether the given register is present on the stack, indicating an
773 /// operand in the tree that we haven't visited yet. Moving a definition of
774 /// Reg to a point in the tree after that would change its value.
775 ///
776 /// This is needed as a consequence of using implicit local.gets for
777 /// uses and implicit local.sets for defs.
778 bool isOnStack(unsigned Reg) const {
779 for (const RangeTy &Range : Worklist)
780 for (const MachineOperand &MO : Range)
781 if (MO.isReg() && MO.getReg() == Reg)
782 return true;
783 return false;
784 }
785};
786
787/// State to keep track of whether commuting is in flight or whether it's been
788/// tried for the current instruction and didn't work.
789class CommutingState {
790 /// There are effectively three states: the initial state where we haven't
791 /// started commuting anything and we don't know anything yet, the tentative
792 /// state where we've commuted the operands of the current instruction and are
793 /// revisiting it, and the declined state where we've reverted the operands
794 /// back to their original order and will no longer commute it further.
795 bool TentativelyCommuting = false;
796 bool Declined = false;
797
798 /// During the tentative state, these hold the operand indices of the commuted
799 /// operands.
800 unsigned Operand0, Operand1;
801
802public:
803 /// Stackification for an operand was not successful due to ordering
804 /// constraints. If possible, and if we haven't already tried it and declined
805 /// it, commute Insert's operands and prepare to revisit it.
806 void maybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker,
807 const WebAssemblyInstrInfo *TII) {
808 if (TentativelyCommuting) {
809 assert(!Declined &&
810 "Don't decline commuting until you've finished trying it");
811 // Commuting didn't help. Revert it.
812 TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
813 TentativelyCommuting = false;
814 Declined = true;
815 } else if (!Declined && TreeWalker.hasRemainingOperands(Insert)) {
818 if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) {
819 // Tentatively commute the operands and try again.
820 TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
821 TreeWalker.resetTopOperands(Insert);
822 TentativelyCommuting = true;
823 Declined = false;
824 }
825 }
826 }
827
828 /// Stackification for some operand was successful. Reset to the default
829 /// state.
830 void reset() {
831 TentativelyCommuting = false;
832 Declined = false;
833 }
834};
835} // end anonymous namespace
836
837bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
838 LLVM_DEBUG(dbgs() << "********** Register Stackifying **********\n"
839 "********** Function: "
840 << MF.getName() << '\n');
841
842 bool Changed = false;
843 MachineRegisterInfo &MRI = MF.getRegInfo();
844 WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
845 const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
846 MachineDominatorTree *MDT = nullptr;
847 LiveIntervals *LIS = nullptr;
848 if (Optimize) {
849 MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
850 LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
851 }
852
853 // Walk the instructions from the bottom up. Currently we don't look past
854 // block boundaries, and the blocks aren't ordered so the block visitation
855 // order isn't significant, but we may want to change this in the future.
856 for (MachineBasicBlock &MBB : MF) {
857 // Don't use a range-based for loop, because we modify the list as we're
858 // iterating over it and the end iterator may change.
859 for (auto MII = MBB.rbegin(); MII != MBB.rend(); ++MII) {
860 MachineInstr *Insert = &*MII;
861 // Don't nest anything inside an inline asm, because we don't have
862 // constraints for $push inputs.
863 if (Insert->isInlineAsm())
864 continue;
865
866 // Ignore debugging intrinsics.
867 if (Insert->isDebugValue())
868 continue;
869
870 // Iterate through the inputs in reverse order, since we'll be pulling
871 // operands off the stack in LIFO order.
872 CommutingState Commuting;
873 TreeWalkerState TreeWalker(Insert);
874 while (!TreeWalker.done()) {
875 MachineOperand &Use = TreeWalker.pop();
876
877 // We're only interested in explicit virtual register operands.
878 if (!Use.isReg())
879 continue;
880
881 Register Reg = Use.getReg();
882 assert(Use.isUse() && "explicit_uses() should only iterate over uses");
883 assert(!Use.isImplicit() &&
884 "explicit_uses() should only iterate over explicit operands");
885 if (Reg.isPhysical())
886 continue;
887
888 // Identify the definition for this register at this point.
889 MachineInstr *DefI = getVRegDef(Reg, Insert, MRI, LIS);
890 if (!DefI)
891 continue;
892
893 // Don't nest an INLINE_ASM def into anything, because we don't have
894 // constraints for $pop outputs.
895 if (DefI->isInlineAsm())
896 continue;
897
898 // Argument instructions represent live-in registers and not real
899 // instructions.
901 continue;
902
903 MachineOperand *Def =
904 DefI->findRegisterDefOperand(Reg, /*TRI=*/nullptr);
905 assert(Def != nullptr);
906
907 // Decide which strategy to take. Prefer to move a single-use value
908 // over cloning it, and prefer cloning over introducing a tee.
909 // For moving, we require the def to be in the same block as the use;
910 // this makes things simpler (LiveIntervals' handleMove function only
911 // supports intra-block moves) and it's MachineSink's job to catch all
912 // the sinking opportunities anyway.
913 bool SameBlock = DefI->getParent() == &MBB;
914 bool CanMove = SameBlock &&
915 isSafeToMove(Def, &Use, Insert, MFI, MRI, Optimize) &&
916 !TreeWalker.isOnStack(Reg);
917 if (CanMove && hasSingleUse(Reg, MRI, MFI, Optimize, DefI, LIS)) {
918 Insert = moveForSingleUse(Reg, Use, DefI, MBB, Insert, LIS, MFI, MRI);
919
920 // If we are removing the frame base reg completely, remove the debug
921 // info as well.
922 // TODO: Encode this properly as a stackified value.
923 if (MFI.isFrameBaseVirtual() && MFI.getFrameBaseVreg() == Reg) {
924 assert(
925 Optimize &&
926 "Stackifying away frame base in unoptimized code not expected");
927 MFI.clearFrameBaseVreg();
928 }
929 } else if (Optimize && shouldRematerialize(*DefI, TII)) {
930 Insert = rematerializeCheapDef(Reg, Use, *DefI, Insert->getIterator(),
931 *LIS, MFI, MRI, TII);
932 } else if (Optimize && CanMove &&
933 oneUseDominatesOtherUses(Reg, Use, MBB, MRI, *MDT, *LIS,
934 MFI)) {
935 Insert = moveAndTeeForMultiUse(Reg, Use, DefI, MBB, Insert, *LIS, MFI,
936 MRI, TII);
937 } else {
938 // We failed to stackify the operand. If the problem was ordering
939 // constraints, Commuting may be able to help.
940 if (!CanMove && SameBlock)
941 Commuting.maybeCommute(Insert, TreeWalker, TII);
942 // Proceed to the next operand.
943 continue;
944 }
945
946 // Stackifying a multivalue def may unlock in-place stackification of
947 // subsequent defs. TODO: Handle the case where the consecutive uses are
948 // not all in the same instruction.
949 auto *SubsequentDef = Insert->defs().begin();
950 auto *SubsequentUse = &Use;
951 while (SubsequentDef != Insert->defs().end() &&
952 SubsequentUse != Use.getParent()->uses().end()) {
953 if (!SubsequentDef->isReg() || !SubsequentUse->isReg())
954 break;
955 Register DefReg = SubsequentDef->getReg();
956 Register UseReg = SubsequentUse->getReg();
957 // TODO: This single-use restriction could be relaxed by using tees
958 if (DefReg != UseReg ||
959 !hasSingleUse(DefReg, MRI, MFI, Optimize, nullptr, nullptr))
960 break;
961 MFI.stackifyVReg(MRI, DefReg);
962 ++SubsequentDef;
963 ++SubsequentUse;
964 }
965
966 // If the instruction we just stackified is an IMPLICIT_DEF, convert it
967 // to a constant 0 so that the def is explicit, and the push/pop
968 // correspondence is maintained.
969 if (Insert->getOpcode() == TargetOpcode::IMPLICIT_DEF)
971
972 // We stackified an operand. Add the defining instruction's operands to
973 // the worklist stack now to continue to build an ever deeper tree.
974 Commuting.reset();
975 TreeWalker.pushOperands(Insert);
976 }
977
978 // If we stackified any operands, skip over the tree to start looking for
979 // the next instruction we can build a tree on.
980 if (Insert != &*MII) {
981 imposeStackOrdering(&*MII);
983 Changed = true;
984 }
985 }
986 }
987
988 // If we used VALUE_STACK anywhere, add it to the live-in sets everywhere so
989 // that it never looks like a use-before-def.
990 if (Changed) {
991 MF.getRegInfo().addLiveIn(WebAssembly::VALUE_STACK);
992 for (MachineBasicBlock &MBB : MF)
993 MBB.addLiveIn(WebAssembly::VALUE_STACK);
994 }
995
996#ifndef NDEBUG
997 // Verify that pushes and pops are performed in LIFO order.
998 SmallVector<unsigned, 0> Stack;
999 for (MachineBasicBlock &MBB : MF) {
1000 for (MachineInstr &MI : MBB) {
1001 if (MI.isDebugInstr())
1002 continue;
1003 for (MachineOperand &MO : reverse(MI.explicit_uses())) {
1004 if (!MO.isReg())
1005 continue;
1006 Register Reg = MO.getReg();
1007 if (MFI.isVRegStackified(Reg))
1008 assert(Stack.pop_back_val() == Reg &&
1009 "Register stack pop should be paired with a push");
1010 }
1011 for (MachineOperand &MO : MI.defs()) {
1012 if (!MO.isReg())
1013 continue;
1014 Register Reg = MO.getReg();
1015 if (MFI.isVRegStackified(Reg))
1016 Stack.push_back(MO.getReg());
1017 }
1018 }
1019 // TODO: Generalize this code to support keeping values on the stack across
1020 // basic block boundaries.
1021 assert(Stack.empty() &&
1022 "Register stack pushes and pops should be balanced");
1023 }
1024#endif
1025
1026 return Changed;
1027}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define DEBUG_TYPE
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Check if it's safe to move From down to To, checking that no physical registers are clobbered.
bool IsDead
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file contains the declaration of the WebAssembly-specific manager for DebugValues associated wit...
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
static bool isSafeToMove(const MachineOperand *Def, const MachineOperand *Use, const MachineInstr *Insert, const WebAssemblyFunctionInfo &MFI, const MachineRegisterInfo &MRI, bool Optimize)
static unsigned getTeeOpcode(const TargetRegisterClass *RC)
Get the appropriate tee opcode for the given register class.
static MachineInstr * rematerializeCheapDef(unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII)
A trivially cloneable instruction; clone it and nest the new copy with the current instruction.
static void imposeStackOrdering(MachineInstr *MI)
static MachineInstr * moveForSingleUse(unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB, MachineInstr *Insert, LiveIntervals *LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI)
A single-use def in the same block with no intervening memory or register dependencies; move the def ...
static void query(const MachineInstr &MI, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS)
static void convertImplicitDefToConstZero(MachineInstr *MI, MachineRegisterInfo &MRI, const TargetInstrInfo *TII, MachineFunction &MF)
static bool hasSingleUse(unsigned Reg, MachineRegisterInfo &MRI, WebAssemblyFunctionInfo &MFI, bool Optimize, MachineInstr *Def, LiveIntervals *LIS)
static MachineInstr * getPrevNonDebugInst(MachineInstr *MI)
static bool shouldRematerialize(const MachineInstr &Def, const WebAssemblyInstrInfo *TII)
static MachineInstr * moveAndTeeForMultiUse(unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB, MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII)
A multiple-use def in the same block with no intervening memory or register dependencies; move the de...
static bool oneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse, const MachineBasicBlock &MBB, const MachineRegisterInfo &MRI, const MachineDominatorTree &MDT, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI)
Test whether OneUse, a use of Reg, dominates all of Reg's other uses.
static void queryCallee(const MachineInstr &MI, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file contains the declaration of the WebAssembly-specific utility functions.
This file contains the entry points for global functions defined in the LLVM WebAssembly back-end.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
LiveInterval - This class represents the liveness of a register, or stack slot.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
LLVM_ABI void handleMove(MachineInstr &MI, bool UpdateFlags=false)
Call this method to notify LiveIntervals that instruction MI has been moved within a basic block.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
LiveInterval & getInterval(Register Reg)
void removeInterval(Register Reg)
Interval removal.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LLVM_ABI void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
LLVM_ABI void splitSeparateComponents(LiveInterval &LI, SmallVectorImpl< LiveInterval * > &SplitLIs)
Split separate components in LiveInterval LI into separate intervals.
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
Segments::iterator iterator
bool liveAt(SlotIndex index) const
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
LLVM_ABI void removeSegment(SlotIndex Start, SlotIndex End, bool RemoveDeadValNo=false)
Remove the specified interval from this live range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition MCRegister.h:69
MachineInstrBundleIterator< const MachineInstr > const_iterator
Instructions::iterator instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
reverse_iterator rbegin()
MachineInstrBundleIterator< MachineInstr > iterator
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool dominates(const MachineInstr *A, const MachineInstr *B) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool isInlineAsm() const
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
mop_range operands()
MachineOperand * mop_iterator
iterator/begin/end - Iterate over all operands of a machine instruction.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
static MachineOperand CreateFPImm(const ConstantFP *CFP)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
static const unsigned CommuteAnyOperandIndex
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
VNInfo - Value Number Information.
SlotIndex def
The index of the defining instruction.
void cloneSink(MachineInstr *Insert, Register NewReg=Register(), bool CloneDef=true) const
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
void stackifyVReg(MachineRegisterInfo &MRI, Register VReg)
IteratorT begin() const
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Define
Register definition.
bool isArgument(unsigned Opc)
const MachineOperand & getCalleeOp(const MachineInstr &MI)
Returns the operand number of a callee, assuming the argument is a call instruction.
bool isCatch(unsigned Opc)
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:310
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
FunctionPass * createWebAssemblyRegStackify(CodeGenOptLevel OptLevel)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:400
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os
Definition CodeGen.h:85
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
unsigned getUndefRegState(bool B)
DWARFExpression::Operation Op
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)