LLVM 22.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
39
40using namespace llvm;
41
43 "disable-sched-hazard", cl::Hidden, cl::init(false),
44 cl::desc("Disable hazard detection during preRA scheduling"));
45
47 "acc-reassoc", cl::Hidden, cl::init(true),
48 cl::desc("Enable reassociation of accumulation chains"));
49
52 cl::desc("Minimum length of accumulator chains "
53 "required for the optimization to kick in"));
54
56 "acc-max-width", cl::Hidden, cl::init(3),
57 cl::desc("Maximum number of branches in the accumulator tree"));
58
60
62 unsigned OpNum) const {
63 if (OpNum >= MCID.getNumOperands())
64 return nullptr;
65
66 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
67 int16_t RegClass = getOpRegClassID(OpInfo);
68
69 // TODO: Remove isLookupPtrRegClass in favor of isLookupRegClassByHwMode
70 if (OpInfo.isLookupPtrRegClass())
71 return TRI.getPointerRegClass(RegClass);
72
73 // Instructions like INSERT_SUBREG do not have fixed register classes.
74 if (RegClass < 0)
75 return nullptr;
76
77 // Otherwise just look it up normally.
78 return TRI.getRegClass(RegClass);
79}
80
81/// insertNoop - Insert a noop into the instruction stream at the specified
82/// point.
85 llvm_unreachable("Target didn't implement insertNoop!");
86}
87
88/// insertNoops - Insert noops into the instruction stream at the specified
89/// point.
92 unsigned Quantity) const {
93 for (unsigned i = 0; i < Quantity; ++i)
95}
96
97static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
98 return strncmp(Str, MAI.getCommentString().data(),
99 MAI.getCommentString().size()) == 0;
100}
101
102/// Measure the specified inline asm to determine an approximation of its
103/// length.
104/// Comments (which run till the next SeparatorString or newline) do not
105/// count as an instruction.
106/// Any other non-whitespace text is considered an instruction, with
107/// multiple instructions separated by SeparatorString or newlines.
108/// Variable-length instructions are not handled here; this function
109/// may be overloaded in the target code to do that.
110/// We implement a special case of the .space directive which takes only a
111/// single integer argument in base 10 that is the size in bytes. This is a
112/// restricted form of the GAS directive in that we only interpret
113/// simple--i.e. not a logical or arithmetic expression--size values without
114/// the optional fill value. This is primarily used for creating arbitrary
115/// sized inline asm blocks for testing purposes.
117 const char *Str,
118 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
119 // Count the number of instructions in the asm.
120 bool AtInsnStart = true;
121 unsigned Length = 0;
122 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
123 for (; *Str; ++Str) {
124 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
125 strlen(MAI.getSeparatorString())) == 0) {
126 AtInsnStart = true;
127 } else if (isAsmComment(Str, MAI)) {
128 // Stop counting as an instruction after a comment until the next
129 // separator.
130 AtInsnStart = false;
131 }
132
133 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
134 unsigned AddLength = MaxInstLength;
135 if (strncmp(Str, ".space", 6) == 0) {
136 char *EStr;
137 int SpaceSize;
138 SpaceSize = strtol(Str + 6, &EStr, 10);
139 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
140 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
141 ++EStr;
142 if (*EStr == '\0' || *EStr == '\n' ||
143 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
144 AddLength = SpaceSize;
145 }
146 Length += AddLength;
147 AtInsnStart = false;
148 }
149 }
150
151 return Length;
152}
153
154/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
155/// after it, replacing it with an unconditional branch to NewDest.
156void
158 MachineBasicBlock *NewDest) const {
159 MachineBasicBlock *MBB = Tail->getParent();
160
161 // Remove all the old successors of MBB from the CFG.
162 while (!MBB->succ_empty())
163 MBB->removeSuccessor(MBB->succ_begin());
164
165 // Save off the debug loc before erasing the instruction.
166 DebugLoc DL = Tail->getDebugLoc();
167
168 // Update call info and remove all the dead instructions
169 // from the end of MBB.
170 while (Tail != MBB->end()) {
171 auto MI = Tail++;
172 if (MI->shouldUpdateAdditionalCallInfo())
173 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
174 MBB->erase(MI);
175 }
176
177 // If MBB isn't immediately before MBB, insert a branch to it.
179 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
180 MBB->addSuccessor(NewDest);
181}
182
184 bool NewMI, unsigned Idx1,
185 unsigned Idx2) const {
186 const MCInstrDesc &MCID = MI.getDesc();
187 bool HasDef = MCID.getNumDefs();
188 if (HasDef && !MI.getOperand(0).isReg())
189 // No idea how to commute this instruction. Target should implement its own.
190 return nullptr;
191
192 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
193 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
194 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
195 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
196 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
197 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
198 "This only knows how to commute register operands so far");
199
200 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
201 Register Reg1 = MI.getOperand(Idx1).getReg();
202 Register Reg2 = MI.getOperand(Idx2).getReg();
203 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
204 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
205 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
206 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
207 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
208 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
209 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
210 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
211 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
212 // Avoid calling isRenamable for virtual registers since we assert that
213 // renamable property is only queried/set for physical registers.
214 bool Reg1IsRenamable =
215 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
216 bool Reg2IsRenamable =
217 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
218
219 // For a case like this:
220 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
221 // we need to update the implicit-def after commuting to result in:
222 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
223 SmallVector<unsigned> UpdateImplicitDefIdx;
224 if (HasDef && MI.hasImplicitDef()) {
225 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
226 Register ImplReg = MO.getReg();
227 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
228 (ImplReg.isPhysical() && Reg0.isPhysical() &&
229 TRI.isSubRegisterEq(ImplReg, Reg0)))
230 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
231 }
232 }
233
234 // If destination is tied to either of the commuted source register, then
235 // it must be updated.
236 if (HasDef && Reg0 == Reg1 &&
237 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
238 Reg2IsKill = false;
239 Reg0 = Reg2;
240 SubReg0 = SubReg2;
241 } else if (HasDef && Reg0 == Reg2 &&
242 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
243 Reg1IsKill = false;
244 Reg0 = Reg1;
245 SubReg0 = SubReg1;
246 }
247
248 MachineInstr *CommutedMI = nullptr;
249 if (NewMI) {
250 // Create a new instruction.
251 MachineFunction &MF = *MI.getMF();
252 CommutedMI = MF.CloneMachineInstr(&MI);
253 } else {
254 CommutedMI = &MI;
255 }
256
257 if (HasDef) {
258 CommutedMI->getOperand(0).setReg(Reg0);
259 CommutedMI->getOperand(0).setSubReg(SubReg0);
260 for (unsigned Idx : UpdateImplicitDefIdx)
261 CommutedMI->getOperand(Idx).setReg(Reg0);
262 }
263 CommutedMI->getOperand(Idx2).setReg(Reg1);
264 CommutedMI->getOperand(Idx1).setReg(Reg2);
265 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
266 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
267 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
268 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
269 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
270 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
271 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
272 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
273 // Avoid calling setIsRenamable for virtual registers since we assert that
274 // renamable property is only queried/set for physical registers.
275 if (Reg1.isPhysical())
276 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
277 if (Reg2.isPhysical())
278 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
279 return CommutedMI;
280}
281
283 unsigned OpIdx1,
284 unsigned OpIdx2) const {
285 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
286 // any commutable operand, which is done in findCommutedOpIndices() method
287 // called below.
288 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
289 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
290 assert(MI.isCommutable() &&
291 "Precondition violation: MI must be commutable.");
292 return nullptr;
293 }
294 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
295}
296
298 unsigned &ResultIdx2,
299 unsigned CommutableOpIdx1,
300 unsigned CommutableOpIdx2) {
301 if (ResultIdx1 == CommuteAnyOperandIndex &&
302 ResultIdx2 == CommuteAnyOperandIndex) {
303 ResultIdx1 = CommutableOpIdx1;
304 ResultIdx2 = CommutableOpIdx2;
305 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
306 if (ResultIdx2 == CommutableOpIdx1)
307 ResultIdx1 = CommutableOpIdx2;
308 else if (ResultIdx2 == CommutableOpIdx2)
309 ResultIdx1 = CommutableOpIdx1;
310 else
311 return false;
312 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
313 if (ResultIdx1 == CommutableOpIdx1)
314 ResultIdx2 = CommutableOpIdx2;
315 else if (ResultIdx1 == CommutableOpIdx2)
316 ResultIdx2 = CommutableOpIdx1;
317 else
318 return false;
319 } else
320 // Check that the result operand indices match the given commutable
321 // operand indices.
322 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
323 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
324
325 return true;
326}
327
329 unsigned &SrcOpIdx1,
330 unsigned &SrcOpIdx2) const {
331 assert(!MI.isBundle() &&
332 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
333
334 const MCInstrDesc &MCID = MI.getDesc();
335 if (!MCID.isCommutable())
336 return false;
337
338 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
339 // is not true, then the target must implement this.
340 unsigned CommutableOpIdx1 = MCID.getNumDefs();
341 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
342 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
343 CommutableOpIdx1, CommutableOpIdx2))
344 return false;
345
346 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
347 // No idea.
348 return false;
349 return true;
350}
351
353 if (!MI.isTerminator()) return false;
354
355 // Conditional branch is a special case.
356 if (MI.isBranch() && !MI.isBarrier())
357 return true;
358 if (!MI.isPredicable())
359 return true;
360 return !isPredicated(MI);
361}
362
365 bool MadeChange = false;
366
367 assert(!MI.isBundle() &&
368 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
369
370 const MCInstrDesc &MCID = MI.getDesc();
371 if (!MI.isPredicable())
372 return false;
373
374 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
375 if (MCID.operands()[i].isPredicate()) {
376 MachineOperand &MO = MI.getOperand(i);
377 if (MO.isReg()) {
378 MO.setReg(Pred[j].getReg());
379 MadeChange = true;
380 } else if (MO.isImm()) {
381 MO.setImm(Pred[j].getImm());
382 MadeChange = true;
383 } else if (MO.isMBB()) {
384 MO.setMBB(Pred[j].getMBB());
385 MadeChange = true;
386 }
387 ++j;
388 }
389 }
390 return MadeChange;
391}
392
394 const MachineInstr &MI,
396 size_t StartSize = Accesses.size();
397 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
398 oe = MI.memoperands_end();
399 o != oe; ++o) {
400 if ((*o)->isLoad() &&
401 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
402 Accesses.push_back(*o);
403 }
404 return Accesses.size() != StartSize;
405}
406
408 const MachineInstr &MI,
410 size_t StartSize = Accesses.size();
411 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
412 oe = MI.memoperands_end();
413 o != oe; ++o) {
414 if ((*o)->isStore() &&
415 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
416 Accesses.push_back(*o);
417 }
418 return Accesses.size() != StartSize;
419}
420
422 unsigned SubIdx, unsigned &Size,
423 unsigned &Offset,
424 const MachineFunction &MF) const {
425 if (!SubIdx) {
426 Size = TRI.getSpillSize(*RC);
427 Offset = 0;
428 return true;
429 }
430 unsigned BitSize = TRI.getSubRegIdxSize(SubIdx);
431 // Convert bit size to byte size.
432 if (BitSize % 8)
433 return false;
434
435 int BitOffset = TRI.getSubRegIdxOffset(SubIdx);
436 if (BitOffset < 0 || BitOffset % 8)
437 return false;
438
439 Size = BitSize / 8;
440 Offset = (unsigned)BitOffset / 8;
441
442 assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
443
444 if (!MF.getDataLayout().isLittleEndian()) {
445 Offset = TRI.getSpillSize(*RC) - (Offset + Size);
446 }
447 return true;
448}
449
452 Register DestReg, unsigned SubIdx,
453 const MachineInstr &Orig) const {
454 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
455 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
456 MBB.insert(I, MI);
457}
458
460 const MachineInstr &MI1,
461 const MachineRegisterInfo *MRI) const {
463}
464
467 MachineBasicBlock::iterator InsertBefore,
468 const MachineInstr &Orig) const {
469 MachineFunction &MF = *MBB.getParent();
470 // CFI instructions are marked as non-duplicable, because Darwin compact
471 // unwind info emission can't handle multiple prologue setups.
472 assert((!Orig.isNotDuplicable() ||
474 Orig.isCFIInstruction())) &&
475 "Instruction cannot be duplicated");
476
477 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
478}
479
480// If the COPY instruction in MI can be folded to a stack operation, return
481// the register class to use.
483 const TargetInstrInfo &TII,
484 unsigned FoldIdx) {
485 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
486 if (MI.getNumOperands() != 2)
487 return nullptr;
488 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
489
490 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
491 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
492
493 if (FoldOp.getSubReg() || LiveOp.getSubReg())
494 return nullptr;
495
496 Register FoldReg = FoldOp.getReg();
497 Register LiveReg = LiveOp.getReg();
498
499 assert(FoldReg.isVirtual() && "Cannot fold physregs");
500
501 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
502 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
503
504 if (LiveOp.getReg().isPhysical())
505 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
506
507 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
508 return RC;
509
510 // FIXME: Allow folding when register classes are memory compatible.
511 return nullptr;
512}
513
514MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
515
516/// Try to remove the load by folding it to a register
517/// operand at the use. We fold the load instructions if load defines a virtual
518/// register, the virtual register is used once in the same BB, and the
519/// instructions in-between do not load or store, and have no side effects.
522 Register &FoldAsLoadDefReg,
523 MachineInstr *&DefMI) const {
524 // Check whether we can move DefMI here.
525 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
526 assert(DefMI);
527 bool SawStore = false;
528 if (!DefMI->isSafeToMove(SawStore))
529 return nullptr;
530
531 // Collect information about virtual register operands of MI.
532 SmallVector<unsigned, 1> SrcOperandIds;
533 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
534 MachineOperand &MO = MI.getOperand(i);
535 if (!MO.isReg())
536 continue;
537 Register Reg = MO.getReg();
538 if (Reg != FoldAsLoadDefReg)
539 continue;
540 // Do not fold if we have a subreg use or a def.
541 if (MO.getSubReg() || MO.isDef())
542 return nullptr;
543 SrcOperandIds.push_back(i);
544 }
545 if (SrcOperandIds.empty())
546 return nullptr;
547
548 // Check whether we can fold the def into SrcOperandId.
549 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
550 FoldAsLoadDefReg = 0;
551 return FoldMI;
552 }
553
554 return nullptr;
555}
556
557std::pair<unsigned, unsigned>
559 switch (MI.getOpcode()) {
560 case TargetOpcode::STACKMAP:
561 // StackMapLiveValues are foldable
562 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
563 case TargetOpcode::PATCHPOINT:
564 // For PatchPoint, the call args are not foldable (even if reported in the
565 // stackmap e.g. via anyregcc).
566 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
567 case TargetOpcode::STATEPOINT:
568 // For statepoints, fold deopt and gc arguments, but not call arguments.
569 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
570 default:
571 llvm_unreachable("unexpected stackmap opcode");
572 }
573}
574
576 ArrayRef<unsigned> Ops, int FrameIndex,
577 const TargetInstrInfo &TII) {
578 unsigned StartIdx = 0;
579 unsigned NumDefs = 0;
580 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
581 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
582
583 unsigned DefToFoldIdx = MI.getNumOperands();
584
585 // Return false if any operands requested for folding are not foldable (not
586 // part of the stackmap's live values).
587 for (unsigned Op : Ops) {
588 if (Op < NumDefs) {
589 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
590 DefToFoldIdx = Op;
591 } else if (Op < StartIdx) {
592 return nullptr;
593 }
594 if (MI.getOperand(Op).isTied())
595 return nullptr;
596 }
597
598 MachineInstr *NewMI =
599 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
600 MachineInstrBuilder MIB(MF, NewMI);
601
602 // No need to fold return, the meta data, and function arguments
603 for (unsigned i = 0; i < StartIdx; ++i)
604 if (i != DefToFoldIdx)
605 MIB.add(MI.getOperand(i));
606
607 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
608 MachineOperand &MO = MI.getOperand(i);
609 unsigned TiedTo = e;
610 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
611
612 if (is_contained(Ops, i)) {
613 assert(TiedTo == e && "Cannot fold tied operands");
614 unsigned SpillSize;
615 unsigned SpillOffset;
616 // Compute the spill slot size and offset.
617 const TargetRegisterClass *RC =
618 MF.getRegInfo().getRegClass(MO.getReg());
619 bool Valid =
620 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
621 if (!Valid)
622 report_fatal_error("cannot spill patchpoint subregister operand");
623 MIB.addImm(StackMaps::IndirectMemRefOp);
624 MIB.addImm(SpillSize);
625 MIB.addFrameIndex(FrameIndex);
626 MIB.addImm(SpillOffset);
627 } else {
628 MIB.add(MO);
629 if (TiedTo < e) {
630 assert(TiedTo < NumDefs && "Bad tied operand");
631 if (TiedTo > DefToFoldIdx)
632 --TiedTo;
633 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
634 }
635 }
636 }
637 return NewMI;
638}
639
640static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
641 const TargetInstrInfo &TII) {
642 // If the machine operand is tied, untie it first.
643 if (MI->getOperand(OpNo).isTied()) {
644 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
645 MI->untieRegOperand(OpNo);
646 // Intentional recursion!
647 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
648 }
649
651 TII.getFrameIndexOperands(NewOps, FI);
652 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
653 MI->removeOperand(OpNo);
654 MI->insert(MI->operands_begin() + OpNo, NewOps);
655
656 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
657 // is the per-target number of operands that represent the memory operand
658 // excluding this one (MD). This includes MO.
660 F.setMemConstraint(InlineAsm::ConstraintCode::m);
661 MachineOperand &MD = MI->getOperand(OpNo - 1);
662 MD.setImm(F);
663}
664
665// Returns nullptr if not possible to fold.
667 ArrayRef<unsigned> Ops, int FI,
668 const TargetInstrInfo &TII) {
669 assert(MI.isInlineAsm() && "wrong opcode");
670 if (Ops.size() > 1)
671 return nullptr;
672 unsigned Op = Ops[0];
673 assert(Op && "should never be first operand");
674 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
675
676 if (!MI.mayFoldInlineAsmRegOp(Op))
677 return nullptr;
678
679 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
680
681 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
682
683 // Update mayload/maystore metadata, and memoperands.
684 const VirtRegInfo &RI =
685 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
688 if (RI.Reads) {
689 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
691 }
692 if (RI.Writes) {
693 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
695 }
696 MachineFunction *MF = NewMI.getMF();
697 const MachineFrameInfo &MFI = MF->getFrameInfo();
699 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
700 MFI.getObjectAlign(FI));
701 NewMI.addMemOperand(*MF, MMO);
702
703 return &NewMI;
704}
705
707 ArrayRef<unsigned> Ops, int FI,
708 LiveIntervals *LIS,
709 VirtRegMap *VRM) const {
710 auto Flags = MachineMemOperand::MONone;
711 for (unsigned OpIdx : Ops)
712 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
714
715 MachineBasicBlock *MBB = MI.getParent();
716 assert(MBB && "foldMemoryOperand needs an inserted instruction");
717 MachineFunction &MF = *MBB->getParent();
718
719 // If we're not folding a load into a subreg, the size of the load is the
720 // size of the spill slot. But if we are, we need to figure out what the
721 // actual load size is.
722 int64_t MemSize = 0;
723 const MachineFrameInfo &MFI = MF.getFrameInfo();
724
725 if (Flags & MachineMemOperand::MOStore) {
726 MemSize = MFI.getObjectSize(FI);
727 } else {
728 for (unsigned OpIdx : Ops) {
729 int64_t OpSize = MFI.getObjectSize(FI);
730
731 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
732 unsigned SubRegSize = TRI.getSubRegIdxSize(SubReg);
733 if (SubRegSize > 0 && !(SubRegSize % 8))
734 OpSize = SubRegSize / 8;
735 }
736
737 MemSize = std::max(MemSize, OpSize);
738 }
739 }
740
741 assert(MemSize && "Did not expect a zero-sized stack slot");
742
743 MachineInstr *NewMI = nullptr;
744
745 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
746 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
747 MI.getOpcode() == TargetOpcode::STATEPOINT) {
748 // Fold stackmap/patchpoint.
749 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
750 if (NewMI)
751 MBB->insert(MI, NewMI);
752 } else if (MI.isInlineAsm()) {
753 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
754 } else {
755 // Ask the target to do the actual folding.
756 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
757 }
758
759 if (NewMI) {
760 NewMI->setMemRefs(MF, MI.memoperands());
761 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
763 NewMI->mayStore()) &&
764 "Folded a def to a non-store!");
765 assert((!(Flags & MachineMemOperand::MOLoad) ||
766 NewMI->mayLoad()) &&
767 "Folded a use to a non-load!");
768 assert(MFI.getObjectOffset(FI) != -1);
769 MachineMemOperand *MMO =
771 Flags, MemSize, MFI.getObjectAlign(FI));
772 NewMI->addMemOperand(MF, MMO);
773
774 // The pass "x86 speculative load hardening" always attaches symbols to
775 // call instructions. We need copy it form old instruction.
776 NewMI->cloneInstrSymbols(MF, MI);
777
778 return NewMI;
779 }
780
781 // Straight COPY may fold as load/store.
782 if (!isCopyInstr(MI) || Ops.size() != 1)
783 return nullptr;
784
785 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
786 if (!RC)
787 return nullptr;
788
789 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
791 if (Flags == MachineMemOperand::MOStore) {
792 if (MO.isUndef()) {
793 // If this is an undef copy, we do not need to bother we inserting spill
794 // code.
795 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
796 } else {
797 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC,
798 Register());
799 }
800 } else
801 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, Register());
802
803 return &*--Pos;
804}
805
808 MachineInstr &LoadMI,
809 LiveIntervals *LIS) const {
810 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
811#ifndef NDEBUG
812 for (unsigned OpIdx : Ops)
813 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
814#endif
815
816 MachineBasicBlock &MBB = *MI.getParent();
817 MachineFunction &MF = *MBB.getParent();
818
819 // Ask the target to do the actual folding.
820 MachineInstr *NewMI = nullptr;
821 int FrameIndex = 0;
822
823 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
824 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
825 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
826 isLoadFromStackSlot(LoadMI, FrameIndex)) {
827 // Fold stackmap/patchpoint.
828 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
829 if (NewMI)
830 NewMI = &*MBB.insert(MI, NewMI);
831 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
832 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
833 } else {
834 // Ask the target to do the actual folding.
835 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
836 }
837
838 if (!NewMI)
839 return nullptr;
840
841 // Copy the memoperands from the load to the folded instruction.
842 if (MI.memoperands_empty()) {
843 NewMI->setMemRefs(MF, LoadMI.memoperands());
844 } else {
845 // Handle the rare case of folding multiple loads.
846 NewMI->setMemRefs(MF, MI.memoperands());
848 E = LoadMI.memoperands_end();
849 I != E; ++I) {
850 NewMI->addMemOperand(MF, *I);
851 }
852 }
853 return NewMI;
854}
855
856/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
857/// replacement instructions immediately precede it. Copy any implicit
858/// operands from MI to the replacement instruction.
860 const TargetRegisterInfo *TRI) {
862 --CopyMI;
863
864 Register DstReg = MI->getOperand(0).getReg();
865 for (const MachineOperand &MO : MI->implicit_operands()) {
866 CopyMI->addOperand(MO);
867
868 // Be conservative about preserving kills when subregister defs are
869 // involved. If there was implicit kill of a super-register overlapping the
870 // copy result, we would kill the subregisters previous copies defined.
871
872 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
873 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
874 }
875}
876
878 MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
879 if (MI->allDefsAreDead()) {
880 MI->setDesc(get(TargetOpcode::KILL));
881 return;
882 }
883
884 MachineOperand &DstMO = MI->getOperand(0);
885 MachineOperand &SrcMO = MI->getOperand(1);
886
887 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
888 if (IdentityCopy || SrcMO.isUndef()) {
889 // No need to insert an identity copy instruction, but replace with a KILL
890 // if liveness is changed.
891 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
892 // We must make sure the super-register gets killed. Replace the
893 // instruction with KILL.
894 MI->setDesc(get(TargetOpcode::KILL));
895 return;
896 }
897 // Vanilla identity copy.
898 MI->eraseFromParent();
899 return;
900 }
901
902 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
903 SrcMO.getReg(), SrcMO.isKill(),
904 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
905 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
906
907 if (MI->getNumOperands() > 2)
909 MI->eraseFromParent();
910}
911
913 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
914 const MachineOperand &Op1 = Inst.getOperand(1);
915 const MachineOperand &Op2 = Inst.getOperand(2);
916 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
917
918 // We need virtual register definitions for the operands that we will
919 // reassociate.
920 MachineInstr *MI1 = nullptr;
921 MachineInstr *MI2 = nullptr;
922 if (Op1.isReg() && Op1.getReg().isVirtual())
923 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
924 if (Op2.isReg() && Op2.getReg().isVirtual())
925 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
926
927 // And at least one operand must be defined in MBB.
928 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
929}
930
932 unsigned Opcode2) const {
933 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
934}
935
937 bool &Commuted) const {
938 const MachineBasicBlock *MBB = Inst.getParent();
939 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
940 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
941 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
942 unsigned Opcode = Inst.getOpcode();
943
944 // If only one operand has the same or inverse opcode and it's the second
945 // source operand, the operands must be commuted.
946 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
947 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
948 if (Commuted)
949 std::swap(MI1, MI2);
950
951 // 1. The previous instruction must be the same type as Inst.
952 // 2. The previous instruction must also be associative/commutative or be the
953 // inverse of such an operation (this can be different even for
954 // instructions with the same opcode if traits like fast-math-flags are
955 // included).
956 // 3. The previous instruction must have virtual register definitions for its
957 // operands in the same basic block as Inst.
958 // 4. The previous instruction's result must only be used by Inst.
959 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
961 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
963 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
964}
965
966// 1. The operation must be associative and commutative or be the inverse of
967// such an operation.
968// 2. The instruction must have virtual register definitions for its
969// operands in the same basic block.
970// 3. The instruction must have a reassociable sibling.
972 bool &Commuted) const {
973 return (isAssociativeAndCommutative(Inst) ||
974 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
975 hasReassociableOperands(Inst, Inst.getParent()) &&
976 hasReassociableSibling(Inst, Commuted);
977}
978
979// Utility routine that checks if \param MO is defined by an
980// \param CombineOpc instruction in the basic block \param MBB.
981// If \param CombineOpc is not provided, the OpCode check will
982// be skipped.
984 unsigned CombineOpc = 0) {
985 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
986 MachineInstr *MI = nullptr;
987
988 if (MO.isReg() && MO.getReg().isVirtual())
989 MI = MRI.getUniqueVRegDef(MO.getReg());
990 // And it needs to be in the trace (otherwise, it won't have a depth).
991 if (!MI || MI->getParent() != &MBB ||
992 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
993 return false;
994 // Must only used by the user we combine with.
995 if (!MRI.hasOneNonDBGUse(MO.getReg()))
996 return false;
997
998 return true;
999}
1000
1001// A chain of accumulation instructions will be selected IFF:
1002// 1. All the accumulation instructions in the chain have the same opcode,
1003// besides the first that has a slightly different opcode because it does
1004// not accumulate into a register.
1005// 2. All the instructions in the chain are combinable (have a single use
1006// which itself is part of the chain).
1007// 3. Meets the required minimum length.
1009 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1010 // Walk up the chain of accumulation instructions and collect them in the
1011 // vector.
1012 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1013 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1014 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1015 std::optional<unsigned> ChainStartOpCode =
1016 getAccumulationStartOpcode(AccumulatorOpcode);
1017
1018 if (!ChainStartOpCode.has_value())
1019 return;
1020
1021 // Push the first accumulator result to the start of the chain.
1022 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1023
1024 // Collect the accumulator input register from all instructions in the chain.
1025 while (CurrentInstr &&
1026 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1027 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1028 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1029 }
1030
1031 // Add the instruction at the top of the chain.
1032 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1033 canCombine(MBB, CurrentInstr->getOperand(1)))
1034 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1035}
1036
1037/// Find chains of accumulations that can be rewritten as a tree for increased
1038/// ILP.
1040 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1042 return false;
1043
1044 unsigned Opc = Root.getOpcode();
1046 return false;
1047
1048 // Verify that this is the end of the chain.
1049 MachineBasicBlock &MBB = *Root.getParent();
1050 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1051 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1052 return false;
1053
1054 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1055 if (User->getOpcode() == Opc)
1056 return false;
1057
1058 // Walk up the use chain and collect the reduction chain.
1060 getAccumulatorChain(&Root, Chain);
1061
1062 // Reject chains which are too short to be worth modifying.
1063 if (Chain.size() < MinAccumulatorDepth)
1064 return false;
1065
1066 // Check if the MBB this instruction is a part of contains any other chains.
1067 // If so, don't apply it.
1068 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1069 for (const auto &I : MBB) {
1070 if (I.getOpcode() == Opc &&
1071 !ReductionChain.contains(I.getOperand(0).getReg()))
1072 return false;
1073 }
1074
1076 return true;
1077}
1078
1079// Reduce branches of the accumulator tree by adding them together.
1081 SmallVectorImpl<Register> &RegistersToReduce,
1084 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1085 Register ResultReg) const {
1088
1089 // Get the opcode for the reduction instruction we will need to build.
1090 // If for some reason it is not defined, early exit and don't apply this.
1091 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1092
1093 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1094 auto RHS = RegistersToReduce[i - 1];
1095 auto LHS = RegistersToReduce[i];
1096 Register Dest;
1097 // If we are reducing 2 registers, reuse the original result register.
1098 if (RegistersToReduce.size() == 2)
1099 Dest = ResultReg;
1100 // Otherwise, create a new virtual register to hold the partial sum.
1101 else {
1102 auto NewVR = MRI.createVirtualRegister(
1103 MRI.getRegClass(Root.getOperand(0).getReg()));
1104 Dest = NewVR;
1105 NewRegs.push_back(Dest);
1106 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1107 }
1108
1109 // Create the new reduction instruction.
1111 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1112 .addReg(RHS, getKillRegState(true))
1113 .addReg(LHS, getKillRegState(true));
1114 // Copy any flags needed from the original instruction.
1115 MIB->setFlags(Root.getFlags());
1116 InsInstrs.push_back(MIB);
1117 }
1118
1119 // If the number of registers to reduce is odd, add the remaining register to
1120 // the vector of registers to reduce.
1121 if (RegistersToReduce.size() % 2 != 0)
1122 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1123
1124 RegistersToReduce = NewRegs;
1125}
1126
1127// The concept of the reassociation pass is that these operations can benefit
1128// from this kind of transformation:
1129//
1130// A = ? op ?
1131// B = A op X (Prev)
1132// C = B op Y (Root)
1133// -->
1134// A = ? op ?
1135// B = X op Y
1136// C = A op B
1137//
1138// breaking the dependency between A and B, allowing them to be executed in
1139// parallel (or back-to-back in a pipeline) instead of depending on each other.
1140
1141// FIXME: This has the potential to be expensive (compile time) while not
1142// improving the code at all. Some ways to limit the overhead:
1143// 1. Track successful transforms; bail out if hit rate gets too low.
1144// 2. Only enable at -O3 or some other non-default optimization level.
1145// 3. Pre-screen pattern candidates here: if an operand of the previous
1146// instruction is known to not increase the critical path, then don't match
1147// that pattern.
1149 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1150 bool DoRegPressureReduce) const {
1151 bool Commute;
1152 if (isReassociationCandidate(Root, Commute)) {
1153 // We found a sequence of instructions that may be suitable for a
1154 // reassociation of operands to increase ILP. Specify each commutation
1155 // possibility for the Prev instruction in the sequence and let the
1156 // machine combiner decide if changing the operands is worthwhile.
1157 if (Commute) {
1160 } else {
1163 }
1164 return true;
1165 }
1166 if (getAccumulatorReassociationPatterns(Root, Patterns))
1167 return true;
1168
1169 return false;
1170}
1171
1172/// Return true when a code sequence can improve loop throughput.
1174 return false;
1175}
1176
1179 switch (Pattern) {
1182 default:
1184 }
1185}
1186
1187std::pair<unsigned, unsigned>
1189 const MachineInstr &Root,
1190 const MachineInstr &Prev) const {
1191 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1192 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1193
1194 // Early exit if both opcodes are associative and commutative. It's a trivial
1195 // reassociation when we only change operands order. In this case opcodes are
1196 // not required to have inverse versions.
1197 if (AssocCommutRoot && AssocCommutPrev) {
1198 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1199 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1200 }
1201
1202 // At least one instruction is not associative or commutative.
1203 // Since we have matched one of the reassociation patterns, we expect that the
1204 // instructions' opcodes are equal or one of them is the inversion of the
1205 // other.
1207 "Incorrectly matched pattern");
1208 unsigned AssocCommutOpcode = Root.getOpcode();
1209 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1210 if (!AssocCommutRoot)
1211 std::swap(AssocCommutOpcode, InverseOpcode);
1212
1213 // The transformation rule (`+` is any associative and commutative binary
1214 // operation, `-` is the inverse):
1215 // REASSOC_AX_BY:
1216 // (A + X) + Y => A + (X + Y)
1217 // (A + X) - Y => A + (X - Y)
1218 // (A - X) + Y => A - (X - Y)
1219 // (A - X) - Y => A - (X + Y)
1220 // REASSOC_XA_BY:
1221 // (X + A) + Y => (X + Y) + A
1222 // (X + A) - Y => (X - Y) + A
1223 // (X - A) + Y => (X + Y) - A
1224 // (X - A) - Y => (X - Y) - A
1225 // REASSOC_AX_YB:
1226 // Y + (A + X) => (Y + X) + A
1227 // Y - (A + X) => (Y - X) - A
1228 // Y + (A - X) => (Y - X) + A
1229 // Y - (A - X) => (Y + X) - A
1230 // REASSOC_XA_YB:
1231 // Y + (X + A) => (Y + X) + A
1232 // Y - (X + A) => (Y - X) - A
1233 // Y + (X - A) => (Y + X) - A
1234 // Y - (X - A) => (Y - X) + A
1235 switch (Pattern) {
1236 default:
1237 llvm_unreachable("Unexpected pattern");
1239 if (!AssocCommutRoot && AssocCommutPrev)
1240 return {AssocCommutOpcode, InverseOpcode};
1241 if (AssocCommutRoot && !AssocCommutPrev)
1242 return {InverseOpcode, InverseOpcode};
1243 if (!AssocCommutRoot && !AssocCommutPrev)
1244 return {InverseOpcode, AssocCommutOpcode};
1245 break;
1247 if (!AssocCommutRoot && AssocCommutPrev)
1248 return {AssocCommutOpcode, InverseOpcode};
1249 if (AssocCommutRoot && !AssocCommutPrev)
1250 return {InverseOpcode, AssocCommutOpcode};
1251 if (!AssocCommutRoot && !AssocCommutPrev)
1252 return {InverseOpcode, InverseOpcode};
1253 break;
1255 if (!AssocCommutRoot && AssocCommutPrev)
1256 return {InverseOpcode, InverseOpcode};
1257 if (AssocCommutRoot && !AssocCommutPrev)
1258 return {AssocCommutOpcode, InverseOpcode};
1259 if (!AssocCommutRoot && !AssocCommutPrev)
1260 return {InverseOpcode, AssocCommutOpcode};
1261 break;
1263 if (!AssocCommutRoot && AssocCommutPrev)
1264 return {InverseOpcode, InverseOpcode};
1265 if (AssocCommutRoot && !AssocCommutPrev)
1266 return {InverseOpcode, AssocCommutOpcode};
1267 if (!AssocCommutRoot && !AssocCommutPrev)
1268 return {AssocCommutOpcode, InverseOpcode};
1269 break;
1270 }
1271 llvm_unreachable("Unhandled combination");
1272}
1273
1274// Return a pair of boolean flags showing if the new root and new prev operands
1275// must be swapped. See visual example of the rule in
1276// TargetInstrInfo::getReassociationOpcodes.
1277static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1278 switch (Pattern) {
1279 default:
1280 llvm_unreachable("Unexpected pattern");
1282 return {false, false};
1284 return {true, false};
1286 return {true, true};
1288 return {true, true};
1289 }
1290}
1291
1293 const MachineInstr &Root, unsigned Pattern,
1294 std::array<unsigned, 5> &OperandIndices) const {
1295 switch (Pattern) {
1297 OperandIndices = {1, 1, 1, 2, 2};
1298 break;
1300 OperandIndices = {2, 1, 2, 2, 1};
1301 break;
1303 OperandIndices = {1, 2, 1, 1, 2};
1304 break;
1306 OperandIndices = {2, 2, 2, 1, 1};
1307 break;
1308 default:
1309 llvm_unreachable("unexpected MachineCombinerPattern");
1310 }
1311}
1312
1313/// Attempt the reassociation transformation to reduce critical path length.
1314/// See the above comments before getMachineCombinerPatterns().
1316 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1320 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1321 MachineFunction *MF = Root.getMF();
1324 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, &TRI);
1325
1330 MachineOperand &OpC = Root.getOperand(0);
1331
1332 Register RegA = OpA.getReg();
1333 unsigned SubRegA = OpA.getSubReg();
1334 Register RegB = OpB.getReg();
1335 Register RegX = OpX.getReg();
1336 unsigned SubRegX = OpX.getSubReg();
1337 Register RegY = OpY.getReg();
1338 unsigned SubRegY = OpY.getSubReg();
1339 Register RegC = OpC.getReg();
1340
1341 if (RegA.isVirtual())
1342 MRI.constrainRegClass(RegA, RC);
1343 if (RegB.isVirtual())
1344 MRI.constrainRegClass(RegB, RC);
1345 if (RegX.isVirtual())
1346 MRI.constrainRegClass(RegX, RC);
1347 if (RegY.isVirtual())
1348 MRI.constrainRegClass(RegY, RC);
1349 if (RegC.isVirtual())
1350 MRI.constrainRegClass(RegC, RC);
1351
1352 // Create a new virtual register for the result of (X op Y) instead of
1353 // recycling RegB because the MachineCombiner's computation of the critical
1354 // path requires a new register definition rather than an existing one.
1355 Register NewVR = MRI.createVirtualRegister(RC);
1356 unsigned SubRegNewVR = 0;
1357 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1358
1359 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1360 bool KillA = OpA.isKill();
1361 bool KillX = OpX.isKill();
1362 bool KillY = OpY.isKill();
1363 bool KillNewVR = true;
1364
1365 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1366
1367 if (SwapPrevOperands) {
1368 std::swap(RegX, RegY);
1369 std::swap(SubRegX, SubRegY);
1370 std::swap(KillX, KillY);
1371 }
1372
1373 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1374 unsigned RootFirstOpIdx, RootSecondOpIdx;
1375 switch (Pattern) {
1377 PrevFirstOpIdx = OperandIndices[1];
1378 PrevSecondOpIdx = OperandIndices[3];
1379 RootFirstOpIdx = OperandIndices[2];
1380 RootSecondOpIdx = OperandIndices[4];
1381 break;
1383 PrevFirstOpIdx = OperandIndices[1];
1384 PrevSecondOpIdx = OperandIndices[3];
1385 RootFirstOpIdx = OperandIndices[4];
1386 RootSecondOpIdx = OperandIndices[2];
1387 break;
1389 PrevFirstOpIdx = OperandIndices[3];
1390 PrevSecondOpIdx = OperandIndices[1];
1391 RootFirstOpIdx = OperandIndices[2];
1392 RootSecondOpIdx = OperandIndices[4];
1393 break;
1395 PrevFirstOpIdx = OperandIndices[3];
1396 PrevSecondOpIdx = OperandIndices[1];
1397 RootFirstOpIdx = OperandIndices[4];
1398 RootSecondOpIdx = OperandIndices[2];
1399 break;
1400 default:
1401 llvm_unreachable("unexpected MachineCombinerPattern");
1402 }
1403
1404 // Basically BuildMI but doesn't add implicit operands by default.
1405 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1406 const MCInstrDesc &MCID, Register DestReg) {
1407 return MachineInstrBuilder(
1408 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1409 .copyMIMetadata(MIMD)
1410 .addReg(DestReg, RegState::Define);
1411 };
1412
1413 // Create new instructions for insertion.
1414 MachineInstrBuilder MIB1 =
1415 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1416 for (const auto &MO : Prev.explicit_operands()) {
1417 unsigned Idx = MO.getOperandNo();
1418 // Skip the result operand we'd already added.
1419 if (Idx == 0)
1420 continue;
1421 if (Idx == PrevFirstOpIdx)
1422 MIB1.addReg(RegX, getKillRegState(KillX), SubRegX);
1423 else if (Idx == PrevSecondOpIdx)
1424 MIB1.addReg(RegY, getKillRegState(KillY), SubRegY);
1425 else
1426 MIB1.add(MO);
1427 }
1428 MIB1.copyImplicitOps(Prev);
1429
1430 if (SwapRootOperands) {
1431 std::swap(RegA, NewVR);
1432 std::swap(SubRegA, SubRegNewVR);
1433 std::swap(KillA, KillNewVR);
1434 }
1435
1436 MachineInstrBuilder MIB2 =
1437 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1438 for (const auto &MO : Root.explicit_operands()) {
1439 unsigned Idx = MO.getOperandNo();
1440 // Skip the result operand.
1441 if (Idx == 0)
1442 continue;
1443 if (Idx == RootFirstOpIdx)
1444 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA), SubRegA);
1445 else if (Idx == RootSecondOpIdx)
1446 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR), SubRegNewVR);
1447 else
1448 MIB2 = MIB2.add(MO);
1449 }
1450 MIB2.copyImplicitOps(Root);
1451
1452 // Propagate FP flags from the original instructions.
1453 // But clear poison-generating flags because those may not be valid now.
1454 // TODO: There should be a helper function for copying only fast-math-flags.
1455 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1456 MIB1->setFlags(IntersectedFlags);
1461
1462 MIB2->setFlags(IntersectedFlags);
1467
1468 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1469
1470 // Record new instructions for insertion and old instructions for deletion.
1471 InsInstrs.push_back(MIB1);
1472 InsInstrs.push_back(MIB2);
1473 DelInstrs.push_back(&Prev);
1474 DelInstrs.push_back(&Root);
1475
1476 // We transformed:
1477 // B = A op X (Prev)
1478 // C = B op Y (Root)
1479 // Into:
1480 // B = X op Y (MIB1)
1481 // C = A op B (MIB2)
1482 // C has the same value as before, B doesn't; as such, keep the debug number
1483 // of C but not of B.
1484 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1485 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1486}
1487
1489 MachineInstr &Root, unsigned Pattern,
1492 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1494 MachineBasicBlock &MBB = *Root.getParent();
1495 MachineFunction &MF = *MBB.getParent();
1497
1498 switch (Pattern) {
1503 // Select the previous instruction in the sequence based on the input
1504 // pattern.
1505 std::array<unsigned, 5> OperandIndices;
1507 MachineInstr *Prev =
1508 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1509
1510 // Don't reassociate if Prev and Root are in different blocks.
1511 if (Prev->getParent() != Root.getParent())
1512 return;
1513
1514 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1515 InstIdxForVirtReg);
1516 break;
1517 }
1519 SmallVector<Register, 32> ChainRegs;
1520 getAccumulatorChain(&Root, ChainRegs);
1521 unsigned int Depth = ChainRegs.size();
1523 "Max accumulator width set to illegal value");
1524 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1525 ? Log2_32(Depth)
1527
1528 // Walk down the chain and rewrite it as a tree.
1529 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1530 // No need to rewrite the first node, it is already perfect as it is.
1531 if (IndexedReg.index() == 0)
1532 continue;
1533
1534 // FIXME: Losing subregisters
1535 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1537 Register AccReg;
1538 if (IndexedReg.index() < MaxWidth) {
1539 // Now we need to create new instructions for the first row.
1540 AccReg = Instr->getOperand(0).getReg();
1541 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1542
1543 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1544 .addReg(Instr->getOperand(2).getReg(),
1545 getKillRegState(Instr->getOperand(2).isKill()))
1546 .addReg(Instr->getOperand(3).getReg(),
1547 getKillRegState(Instr->getOperand(3).isKill()));
1548 } else {
1549 // For the remaining cases, we need to use an output register of one of
1550 // the newly inserted instuctions as operand 1
1551 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1552 ? MRI.createVirtualRegister(
1553 MRI.getRegClass(Root.getOperand(0).getReg()))
1554 : Instr->getOperand(0).getReg();
1555 assert(IndexedReg.index() >= MaxWidth);
1556 auto AccumulatorInput =
1557 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1558 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1559 AccReg)
1560 .addReg(AccumulatorInput, getKillRegState(true))
1561 .addReg(Instr->getOperand(2).getReg(),
1562 getKillRegState(Instr->getOperand(2).isKill()))
1563 .addReg(Instr->getOperand(3).getReg(),
1564 getKillRegState(Instr->getOperand(3).isKill()));
1565 }
1566
1567 MIB->setFlags(Instr->getFlags());
1568 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1569 InsInstrs.push_back(MIB);
1570 DelInstrs.push_back(Instr);
1571 }
1572
1573 SmallVector<Register, 8> RegistersToReduce;
1574 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1575 ++i) {
1576 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1577 RegistersToReduce.push_back(Reg);
1578 }
1579
1580 while (RegistersToReduce.size() > 1)
1581 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1582 InstIdxForVirtReg, Root.getOperand(0).getReg());
1583
1584 break;
1585 }
1586 }
1587}
1588
1592
1594 const MachineInstr &MI) const {
1595 const MachineFunction &MF = *MI.getMF();
1596 const MachineRegisterInfo &MRI = MF.getRegInfo();
1597
1598 // Remat clients assume operand 0 is the defined register.
1599 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1600 return false;
1601 Register DefReg = MI.getOperand(0).getReg();
1602
1603 // A sub-register definition can only be rematerialized if the instruction
1604 // doesn't read the other parts of the register. Otherwise it is really a
1605 // read-modify-write operation on the full virtual register which cannot be
1606 // moved safely.
1607 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1608 MI.readsVirtualRegister(DefReg))
1609 return false;
1610
1611 // A load from a fixed stack slot can be rematerialized. This may be
1612 // redundant with subsequent checks, but it's target-independent,
1613 // simple, and a common case.
1614 int FrameIdx = 0;
1615 if (isLoadFromStackSlot(MI, FrameIdx) &&
1616 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1617 return true;
1618
1619 // Avoid instructions obviously unsafe for remat.
1620 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1621 MI.hasUnmodeledSideEffects())
1622 return false;
1623
1624 // Don't remat inline asm. We have no idea how expensive it is
1625 // even if it's side effect free.
1626 if (MI.isInlineAsm())
1627 return false;
1628
1629 // Avoid instructions which load from potentially varying memory.
1630 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1631 return false;
1632
1633 // If any of the registers accessed are non-constant, conservatively assume
1634 // the instruction is not rematerializable.
1635 for (const MachineOperand &MO : MI.operands()) {
1636 if (!MO.isReg()) continue;
1637 Register Reg = MO.getReg();
1638 if (Reg == 0)
1639 continue;
1640
1641 // Check for a well-behaved physical register.
1642 if (Reg.isPhysical()) {
1643 if (MO.isUse()) {
1644 // If the physreg has no defs anywhere, it's just an ambient register
1645 // and we can freely move its uses. Alternatively, if it's allocatable,
1646 // it could get allocated to something with a def during allocation.
1647 if (!MRI.isConstantPhysReg(Reg))
1648 return false;
1649 } else {
1650 // A physreg def. We can't remat it.
1651 return false;
1652 }
1653 continue;
1654 }
1655
1656 // Only allow one virtual-register def. There may be multiple defs of the
1657 // same virtual register, though.
1658 if (MO.isDef() && Reg != DefReg)
1659 return false;
1660 }
1661
1662 // Everything checked out.
1663 return true;
1664}
1665
1667 const MachineFunction *MF = MI.getMF();
1669 bool StackGrowsDown =
1671
1672 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1673 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1674
1675 if (!isFrameInstr(MI))
1676 return 0;
1677
1678 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1679
1680 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1681 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1682 SPAdj = -SPAdj;
1683
1684 return SPAdj;
1685}
1686
1687/// isSchedulingBoundary - Test if the given instruction should be
1688/// considered a scheduling boundary. This primarily includes labels
1689/// and terminators.
1691 const MachineBasicBlock *MBB,
1692 const MachineFunction &MF) const {
1693 // Terminators and labels can't be scheduled around.
1694 if (MI.isTerminator() || MI.isPosition())
1695 return true;
1696
1697 // INLINEASM_BR can jump to another block
1698 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1699 return true;
1700
1701 // Don't attempt to schedule around any instruction that defines
1702 // a stack-oriented pointer, as it's unlikely to be profitable. This
1703 // saves compile time, because it doesn't require every single
1704 // stack slot reference to depend on the instruction that does the
1705 // modification.
1706 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1707 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), &TRI);
1708}
1709
1710// Provide a global flag for disabling the PreRA hazard recognizer that targets
1711// may choose to honor.
1715
1716// Default implementation of CreateTargetRAHazardRecognizer.
1719 const ScheduleDAG *DAG) const {
1720 // Dummy hazard recognizer allows all instructions to issue.
1721 return new ScheduleHazardRecognizer();
1722}
1723
1724// Default implementation of CreateTargetMIHazardRecognizer.
1726 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1727 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1728}
1729
1730// Default implementation of CreateTargetPostRAHazardRecognizer.
1736
1737// Default implementation of getMemOperandWithOffset.
1739 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1740 bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
1743 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1744 Width, &TRI) ||
1745 BaseOps.size() != 1)
1746 return false;
1747 BaseOp = BaseOps.front();
1748 return true;
1749}
1750
1751//===----------------------------------------------------------------------===//
1752// SelectionDAG latency interface.
1753//===----------------------------------------------------------------------===//
1754
1755std::optional<unsigned>
1757 SDNode *DefNode, unsigned DefIdx,
1758 SDNode *UseNode, unsigned UseIdx) const {
1759 if (!ItinData || ItinData->isEmpty())
1760 return std::nullopt;
1761
1762 if (!DefNode->isMachineOpcode())
1763 return std::nullopt;
1764
1765 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1766 if (!UseNode->isMachineOpcode())
1767 return ItinData->getOperandCycle(DefClass, DefIdx);
1768 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1769 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1770}
1771
1773 SDNode *N) const {
1774 if (!ItinData || ItinData->isEmpty())
1775 return 1;
1776
1777 if (!N->isMachineOpcode())
1778 return 1;
1779
1780 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1781}
1782
1783//===----------------------------------------------------------------------===//
1784// MachineInstr latency interface.
1785//===----------------------------------------------------------------------===//
1786
1788 const MachineInstr &MI) const {
1789 if (!ItinData || ItinData->isEmpty())
1790 return 1;
1791
1792 unsigned Class = MI.getDesc().getSchedClass();
1793 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1794 if (UOps >= 0)
1795 return UOps;
1796
1797 // The # of u-ops is dynamically determined. The specific target should
1798 // override this function to return the right number.
1799 return 1;
1800}
1801
1802/// Return the default expected latency for a def based on it's opcode.
1804 const MachineInstr &DefMI) const {
1805 if (DefMI.isTransient())
1806 return 0;
1807 if (DefMI.mayLoad())
1808 return SchedModel.LoadLatency;
1809 if (isHighLatencyDef(DefMI.getOpcode()))
1810 return SchedModel.HighLatency;
1811 return 1;
1812}
1813
1815 return 0;
1816}
1817
1819 const MachineInstr &MI,
1820 unsigned *PredCost) const {
1821 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1822 // still have a MinLatency property, which getStageLatency checks.
1823 if (!ItinData)
1824 return MI.mayLoad() ? 2 : 1;
1825
1826 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1827}
1828
1830 const MachineInstr &DefMI,
1831 unsigned DefIdx) const {
1832 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1833 if (!ItinData || ItinData->isEmpty())
1834 return false;
1835
1836 unsigned DefClass = DefMI.getDesc().getSchedClass();
1837 std::optional<unsigned> DefCycle =
1838 ItinData->getOperandCycle(DefClass, DefIdx);
1839 return DefCycle && DefCycle <= 1U;
1840}
1841
1843 // TODO: We don't split functions where a section attribute has been set
1844 // since the split part may not be placed in a contiguous region. It may also
1845 // be more beneficial to augment the linker to ensure contiguous layout of
1846 // split functions within the same section as specified by the attribute.
1847 if (MF.getFunction().hasSection())
1848 return false;
1849
1850 // We don't want to proceed further for cold functions
1851 // or functions of unknown hotness. Lukewarm functions have no prefix.
1852 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1853 if (SectionPrefix &&
1854 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1855 return false;
1856 }
1857
1858 return true;
1859}
1860
1861std::optional<ParamLoadedValue>
1863 Register Reg) const {
1864 const MachineFunction *MF = MI.getMF();
1866 int64_t Offset;
1867 bool OffsetIsScalable;
1868
1869 // To simplify the sub-register handling, verify that we only need to
1870 // consider physical registers.
1871 assert(MF->getProperties().hasNoVRegs());
1872
1873 if (auto DestSrc = isCopyInstr(MI)) {
1874 Register DestReg = DestSrc->Destination->getReg();
1875
1876 // If the copy destination is the forwarding reg, describe the forwarding
1877 // reg using the copy source as the backup location. Example:
1878 //
1879 // x0 = MOV x7
1880 // call callee(x0) ; x0 described as x7
1881 if (Reg == DestReg)
1882 return ParamLoadedValue(*DestSrc->Source, Expr);
1883
1884 // If the target's hook couldn't describe this copy, give up.
1885 return std::nullopt;
1886 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1887 Register SrcReg = RegImm->Reg;
1888 Offset = RegImm->Imm;
1890 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1891 } else if (MI.hasOneMemOperand()) {
1892 // Only describe memory which provably does not escape the function. As
1893 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1894 // callee (or by another thread).
1895 const MachineFrameInfo &MFI = MF->getFrameInfo();
1896 const MachineMemOperand *MMO = MI.memoperands()[0];
1897 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1898
1899 // If the address points to "special" memory (e.g. a spill slot), it's
1900 // sufficient to check that it isn't aliased by any high-level IR value.
1901 if (!PSV || PSV->mayAlias(&MFI))
1902 return std::nullopt;
1903
1904 const MachineOperand *BaseOp;
1905 if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
1906 return std::nullopt;
1907
1908 // FIXME: Scalable offsets are not yet handled in the offset code below.
1909 if (OffsetIsScalable)
1910 return std::nullopt;
1911
1912 // TODO: Can currently only handle mem instructions with a single define.
1913 // An example from the x86 target:
1914 // ...
1915 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1916 // ...
1917 //
1918 if (MI.getNumExplicitDefs() != 1)
1919 return std::nullopt;
1920
1921 // TODO: In what way do we need to take Reg into consideration here?
1922
1925 Ops.push_back(dwarf::DW_OP_deref_size);
1926 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1927 : ~UINT64_C(0));
1928 Expr = DIExpression::prependOpcodes(Expr, Ops);
1929 return ParamLoadedValue(*BaseOp, Expr);
1930 }
1931
1932 return std::nullopt;
1933}
1934
1935// Get the call frame size just before MI.
1937 // Search backwards from MI for the most recent call frame instruction.
1938 MachineBasicBlock *MBB = MI.getParent();
1939 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1940 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1941 return getFrameTotalSize(AdjI);
1942 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1943 return 0;
1944 }
1945
1946 // If none was found, use the call frame size from the start of the basic
1947 // block.
1948 return MBB->getCallFrameSize();
1949}
1950
1951/// Both DefMI and UseMI must be valid. By default, call directly to the
1952/// itinerary. This may be overriden by the target.
1954 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1955 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1956 unsigned DefClass = DefMI.getDesc().getSchedClass();
1957 unsigned UseClass = UseMI.getDesc().getSchedClass();
1958 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1959}
1960
1962 const MachineInstr &MI, unsigned DefIdx,
1963 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1964 assert((MI.isRegSequence() ||
1965 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1966
1967 if (!MI.isRegSequence())
1968 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1969
1970 // We are looking at:
1971 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1972 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1973 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1974 OpIdx += 2) {
1975 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1976 if (MOReg.isUndef())
1977 continue;
1978 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1979 assert(MOSubIdx.isImm() &&
1980 "One of the subindex of the reg_sequence is not an immediate");
1981 // Record Reg:SubReg, SubIdx.
1982 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1983 (unsigned)MOSubIdx.getImm()));
1984 }
1985 return true;
1986}
1987
1989 const MachineInstr &MI, unsigned DefIdx,
1990 RegSubRegPairAndIdx &InputReg) const {
1991 assert((MI.isExtractSubreg() ||
1992 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1993
1994 if (!MI.isExtractSubreg())
1995 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1996
1997 // We are looking at:
1998 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1999 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
2000 const MachineOperand &MOReg = MI.getOperand(1);
2001 if (MOReg.isUndef())
2002 return false;
2003 const MachineOperand &MOSubIdx = MI.getOperand(2);
2004 assert(MOSubIdx.isImm() &&
2005 "The subindex of the extract_subreg is not an immediate");
2006
2007 InputReg.Reg = MOReg.getReg();
2008 InputReg.SubReg = MOReg.getSubReg();
2009 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2010 return true;
2011}
2012
2014 const MachineInstr &MI, unsigned DefIdx,
2015 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2016 assert((MI.isInsertSubreg() ||
2017 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2018
2019 if (!MI.isInsertSubreg())
2020 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2021
2022 // We are looking at:
2023 // Def = INSERT_SEQUENCE v0, v1, sub0.
2024 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2025 const MachineOperand &MOBaseReg = MI.getOperand(1);
2026 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2027 if (MOInsertedReg.isUndef())
2028 return false;
2029 const MachineOperand &MOSubIdx = MI.getOperand(3);
2030 assert(MOSubIdx.isImm() &&
2031 "One of the subindex of the reg_sequence is not an immediate");
2032 BaseReg.Reg = MOBaseReg.getReg();
2033 BaseReg.SubReg = MOBaseReg.getSubReg();
2034
2035 InsertedReg.Reg = MOInsertedReg.getReg();
2036 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2037 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2038 return true;
2039}
2040
2041// Returns a MIRPrinter comment for this machine operand.
2043 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2044 const TargetRegisterInfo * /*RemoveMe*/) const {
2045
2046 if (!MI.isInlineAsm())
2047 return "";
2048
2049 std::string Flags;
2050 raw_string_ostream OS(Flags);
2051
2053 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2054 unsigned ExtraInfo = Op.getImm();
2055 bool First = true;
2056 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
2057 if (!First)
2058 OS << " ";
2059 First = false;
2060 OS << Info;
2061 }
2062
2063 return Flags;
2064 }
2065
2066 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2067 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2068 return "";
2069
2070 assert(Op.isImm() && "Expected flag operand to be an immediate");
2071 // Pretty print the inline asm operand descriptor.
2072 unsigned Flag = Op.getImm();
2073 const InlineAsm::Flag F(Flag);
2074 OS << F.getKindName();
2075
2076 unsigned RCID;
2077 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID))
2078 OS << ':' << TRI.getRegClassName(TRI.getRegClass(RCID));
2079
2080 if (F.isMemKind()) {
2081 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2083 }
2084
2085 unsigned TiedTo;
2086 if (F.isUseOperandTiedToDef(TiedTo))
2087 OS << " tiedto:$" << TiedTo;
2088
2089 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2090 F.getRegMayBeFolded())
2091 OS << " foldable";
2092
2093 return Flags;
2094}
2095
2097
2099 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2100 // Include target features from an arbitrary candidate for the outlined
2101 // function. This makes sure the outlined function knows what kinds of
2102 // instructions are going into it. This is fine, since all parent functions
2103 // must necessarily support the instructions that are in the outlined region.
2104 outliner::Candidate &FirstCand = Candidates.front();
2105 const Function &ParentFn = FirstCand.getMF()->getFunction();
2106 if (ParentFn.hasFnAttribute("target-features"))
2107 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2108 if (ParentFn.hasFnAttribute("target-cpu"))
2109 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2110
2111 // Set nounwind, so we don't generate eh_frame.
2112 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2113 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2114 }))
2115 F.addFnAttr(Attribute::NoUnwind);
2116}
2117
2121 unsigned Flags) const {
2122 MachineInstr &MI = *MIT;
2123
2124 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2125 // have support for outlining those. Special-case that here.
2126 if (MI.isCFIInstruction())
2127 // Just go right to the target implementation.
2128 return getOutliningTypeImpl(MMI, MIT, Flags);
2129
2130 // Be conservative about inline assembly.
2131 if (MI.isInlineAsm())
2133
2134 // Labels generally can't safely be outlined.
2135 if (MI.isLabel())
2137
2138 // Don't let debug instructions impact analysis.
2139 if (MI.isDebugInstr())
2141
2142 // Some other special cases.
2143 switch (MI.getOpcode()) {
2144 case TargetOpcode::IMPLICIT_DEF:
2145 case TargetOpcode::KILL:
2146 case TargetOpcode::LIFETIME_START:
2147 case TargetOpcode::LIFETIME_END:
2149 default:
2150 break;
2151 }
2152
2153 // Is this a terminator for a basic block?
2154 if (MI.isTerminator()) {
2155 // If this is a branch to another block, we can't outline it.
2156 if (!MI.getParent()->succ_empty())
2158
2159 // Don't outline if the branch is not unconditional.
2160 if (isPredicated(MI))
2162 }
2163
2164 // Make sure none of the operands of this instruction do anything that
2165 // might break if they're moved outside their current function.
2166 // This includes MachineBasicBlock references, BlockAddressses,
2167 // Constant pool indices and jump table indices.
2168 //
2169 // A quick note on MO_TargetIndex:
2170 // This doesn't seem to be used in any of the architectures that the
2171 // MachineOutliner supports, but it was still filtered out in all of them.
2172 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2173 // As such, this check is removed both here and in the target-specific
2174 // implementations. Instead, we assert to make sure this doesn't
2175 // catch anyone off-guard somewhere down the line.
2176 for (const MachineOperand &MOP : MI.operands()) {
2177 // If you hit this assertion, please remove it and adjust
2178 // `getOutliningTypeImpl` for your target appropriately if necessary.
2179 // Adding the assertion back to other supported architectures
2180 // would be nice too :)
2181 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2182
2183 // CFI instructions should already have been filtered out at this point.
2184 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2185
2186 // PrologEpilogInserter should've already run at this point.
2187 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2188
2189 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2191 }
2192
2193 // If we don't know, delegate to the target-specific hook.
2194 return getOutliningTypeImpl(MMI, MIT, Flags);
2195}
2196
2198 unsigned &Flags) const {
2199 // Some instrumentations create special TargetOpcode at the start which
2200 // expands to special code sequences which must be present.
2201 auto First = MBB.getFirstNonDebugInstr();
2202 if (First == MBB.end())
2203 return true;
2204
2205 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2206 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2207 return false;
2208
2209 // Some instrumentations create special pseudo-instructions at or just before
2210 // the end that must be present.
2211 auto Last = MBB.getLastNonDebugInstr();
2212 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2213 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2214 return false;
2215
2216 if (Last != First && Last->isReturn()) {
2217 --Last;
2218 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2219 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2220 return false;
2221 }
2222 return true;
2223}
2224
2226 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2227 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2228}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:207
A debug info location.
Definition DebugLoc.h:124
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:233
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:309
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:446
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:527
StringRef getCommentString() const
Definition MCAsmInfo.h:538
const char * getSeparatorString() const
Definition MCAsmInfo.h:533
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:87
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
const TargetRegisterInfo & TRI
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:613
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
@ Length
Definition DWP.cpp:477
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const