LLVM 20.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
30#include "llvm/IR/DataLayout.h"
32#include "llvm/MC/MCAsmInfo.h"
38
39using namespace llvm;
40
42 "disable-sched-hazard", cl::Hidden, cl::init(false),
43 cl::desc("Disable hazard detection during preRA scheduling"));
44
46
48TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
50 const MachineFunction &MF) const {
51 if (OpNum >= MCID.getNumOperands())
52 return nullptr;
53
54 short RegClass = MCID.operands()[OpNum].RegClass;
55 if (MCID.operands()[OpNum].isLookupPtrRegClass())
56 return TRI->getPointerRegClass(MF, RegClass);
57
58 // Instructions like INSERT_SUBREG do not have fixed register classes.
59 if (RegClass < 0)
60 return nullptr;
61
62 // Otherwise just look it up normally.
63 return TRI->getRegClass(RegClass);
64}
65
66/// insertNoop - Insert a noop into the instruction stream at the specified
67/// point.
70 llvm_unreachable("Target didn't implement insertNoop!");
71}
72
73/// insertNoops - Insert noops into the instruction stream at the specified
74/// point.
77 unsigned Quantity) const {
78 for (unsigned i = 0; i < Quantity; ++i)
80}
81
82static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
83 return strncmp(Str, MAI.getCommentString().data(),
84 MAI.getCommentString().size()) == 0;
85}
86
87/// Measure the specified inline asm to determine an approximation of its
88/// length.
89/// Comments (which run till the next SeparatorString or newline) do not
90/// count as an instruction.
91/// Any other non-whitespace text is considered an instruction, with
92/// multiple instructions separated by SeparatorString or newlines.
93/// Variable-length instructions are not handled here; this function
94/// may be overloaded in the target code to do that.
95/// We implement a special case of the .space directive which takes only a
96/// single integer argument in base 10 that is the size in bytes. This is a
97/// restricted form of the GAS directive in that we only interpret
98/// simple--i.e. not a logical or arithmetic expression--size values without
99/// the optional fill value. This is primarily used for creating arbitrary
100/// sized inline asm blocks for testing purposes.
102 const char *Str,
103 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
104 // Count the number of instructions in the asm.
105 bool AtInsnStart = true;
106 unsigned Length = 0;
107 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
108 for (; *Str; ++Str) {
109 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
110 strlen(MAI.getSeparatorString())) == 0) {
111 AtInsnStart = true;
112 } else if (isAsmComment(Str, MAI)) {
113 // Stop counting as an instruction after a comment until the next
114 // separator.
115 AtInsnStart = false;
116 }
117
118 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
119 unsigned AddLength = MaxInstLength;
120 if (strncmp(Str, ".space", 6) == 0) {
121 char *EStr;
122 int SpaceSize;
123 SpaceSize = strtol(Str + 6, &EStr, 10);
124 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
125 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
126 ++EStr;
127 if (*EStr == '\0' || *EStr == '\n' ||
128 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
129 AddLength = SpaceSize;
130 }
131 Length += AddLength;
132 AtInsnStart = false;
133 }
134 }
135
136 return Length;
137}
138
139/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
140/// after it, replacing it with an unconditional branch to NewDest.
141void
143 MachineBasicBlock *NewDest) const {
144 MachineBasicBlock *MBB = Tail->getParent();
145
146 // Remove all the old successors of MBB from the CFG.
147 while (!MBB->succ_empty())
149
150 // Save off the debug loc before erasing the instruction.
151 DebugLoc DL = Tail->getDebugLoc();
152
153 // Update call site info and remove all the dead instructions
154 // from the end of MBB.
155 while (Tail != MBB->end()) {
156 auto MI = Tail++;
157 if (MI->shouldUpdateCallSiteInfo())
159 MBB->erase(MI);
160 }
161
162 // If MBB isn't immediately before MBB, insert a branch to it.
164 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
165 MBB->addSuccessor(NewDest);
166}
167
169 bool NewMI, unsigned Idx1,
170 unsigned Idx2) const {
171 const MCInstrDesc &MCID = MI.getDesc();
172 bool HasDef = MCID.getNumDefs();
173 if (HasDef && !MI.getOperand(0).isReg())
174 // No idea how to commute this instruction. Target should implement its own.
175 return nullptr;
176
177 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
178 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
179 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
180 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
181 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
182 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
183 "This only knows how to commute register operands so far");
184
185 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
186 Register Reg1 = MI.getOperand(Idx1).getReg();
187 Register Reg2 = MI.getOperand(Idx2).getReg();
188 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
189 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
190 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
191 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
192 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
193 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
194 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
195 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
196 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
197 // Avoid calling isRenamable for virtual registers since we assert that
198 // renamable property is only queried/set for physical registers.
199 bool Reg1IsRenamable =
200 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
201 bool Reg2IsRenamable =
202 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
203 // If destination is tied to either of the commuted source register, then
204 // it must be updated.
205 if (HasDef && Reg0 == Reg1 &&
206 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
207 Reg2IsKill = false;
208 Reg0 = Reg2;
209 SubReg0 = SubReg2;
210 } else if (HasDef && Reg0 == Reg2 &&
211 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
212 Reg1IsKill = false;
213 Reg0 = Reg1;
214 SubReg0 = SubReg1;
215 }
216
217 MachineInstr *CommutedMI = nullptr;
218 if (NewMI) {
219 // Create a new instruction.
220 MachineFunction &MF = *MI.getMF();
221 CommutedMI = MF.CloneMachineInstr(&MI);
222 } else {
223 CommutedMI = &MI;
224 }
225
226 if (HasDef) {
227 CommutedMI->getOperand(0).setReg(Reg0);
228 CommutedMI->getOperand(0).setSubReg(SubReg0);
229 }
230 CommutedMI->getOperand(Idx2).setReg(Reg1);
231 CommutedMI->getOperand(Idx1).setReg(Reg2);
232 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
233 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
234 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
235 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
236 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
237 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
238 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
239 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
240 // Avoid calling setIsRenamable for virtual registers since we assert that
241 // renamable property is only queried/set for physical registers.
242 if (Reg1.isPhysical())
243 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
244 if (Reg2.isPhysical())
245 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
246 return CommutedMI;
247}
248
250 unsigned OpIdx1,
251 unsigned OpIdx2) const {
252 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
253 // any commutable operand, which is done in findCommutedOpIndices() method
254 // called below.
255 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
256 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
257 assert(MI.isCommutable() &&
258 "Precondition violation: MI must be commutable.");
259 return nullptr;
260 }
261 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
262}
263
265 unsigned &ResultIdx2,
266 unsigned CommutableOpIdx1,
267 unsigned CommutableOpIdx2) {
268 if (ResultIdx1 == CommuteAnyOperandIndex &&
269 ResultIdx2 == CommuteAnyOperandIndex) {
270 ResultIdx1 = CommutableOpIdx1;
271 ResultIdx2 = CommutableOpIdx2;
272 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
273 if (ResultIdx2 == CommutableOpIdx1)
274 ResultIdx1 = CommutableOpIdx2;
275 else if (ResultIdx2 == CommutableOpIdx2)
276 ResultIdx1 = CommutableOpIdx1;
277 else
278 return false;
279 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
280 if (ResultIdx1 == CommutableOpIdx1)
281 ResultIdx2 = CommutableOpIdx2;
282 else if (ResultIdx1 == CommutableOpIdx2)
283 ResultIdx2 = CommutableOpIdx1;
284 else
285 return false;
286 } else
287 // Check that the result operand indices match the given commutable
288 // operand indices.
289 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
290 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
291
292 return true;
293}
294
296 unsigned &SrcOpIdx1,
297 unsigned &SrcOpIdx2) const {
298 assert(!MI.isBundle() &&
299 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
300
301 const MCInstrDesc &MCID = MI.getDesc();
302 if (!MCID.isCommutable())
303 return false;
304
305 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
306 // is not true, then the target must implement this.
307 unsigned CommutableOpIdx1 = MCID.getNumDefs();
308 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
309 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
310 CommutableOpIdx1, CommutableOpIdx2))
311 return false;
312
313 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
314 // No idea.
315 return false;
316 return true;
317}
318
320 if (!MI.isTerminator()) return false;
321
322 // Conditional branch is a special case.
323 if (MI.isBranch() && !MI.isBarrier())
324 return true;
325 if (!MI.isPredicable())
326 return true;
327 return !isPredicated(MI);
328}
329
332 bool MadeChange = false;
333
334 assert(!MI.isBundle() &&
335 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
336
337 const MCInstrDesc &MCID = MI.getDesc();
338 if (!MI.isPredicable())
339 return false;
340
341 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
342 if (MCID.operands()[i].isPredicate()) {
343 MachineOperand &MO = MI.getOperand(i);
344 if (MO.isReg()) {
345 MO.setReg(Pred[j].getReg());
346 MadeChange = true;
347 } else if (MO.isImm()) {
348 MO.setImm(Pred[j].getImm());
349 MadeChange = true;
350 } else if (MO.isMBB()) {
351 MO.setMBB(Pred[j].getMBB());
352 MadeChange = true;
353 }
354 ++j;
355 }
356 }
357 return MadeChange;
358}
359
361 const MachineInstr &MI,
363 size_t StartSize = Accesses.size();
364 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
365 oe = MI.memoperands_end();
366 o != oe; ++o) {
367 if ((*o)->isLoad() &&
368 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
369 Accesses.push_back(*o);
370 }
371 return Accesses.size() != StartSize;
372}
373
375 const MachineInstr &MI,
377 size_t StartSize = Accesses.size();
378 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
379 oe = MI.memoperands_end();
380 o != oe; ++o) {
381 if ((*o)->isStore() &&
382 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
383 Accesses.push_back(*o);
384 }
385 return Accesses.size() != StartSize;
386}
387
389 unsigned SubIdx, unsigned &Size,
390 unsigned &Offset,
391 const MachineFunction &MF) const {
393 if (!SubIdx) {
394 Size = TRI->getSpillSize(*RC);
395 Offset = 0;
396 return true;
397 }
398 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
399 // Convert bit size to byte size.
400 if (BitSize % 8)
401 return false;
402
403 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
404 if (BitOffset < 0 || BitOffset % 8)
405 return false;
406
407 Size = BitSize / 8;
408 Offset = (unsigned)BitOffset / 8;
409
410 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
411
412 if (!MF.getDataLayout().isLittleEndian()) {
413 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
414 }
415 return true;
416}
417
420 Register DestReg, unsigned SubIdx,
421 const MachineInstr &Orig,
422 const TargetRegisterInfo &TRI) const {
424 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
425 MBB.insert(I, MI);
426}
427
429 const MachineInstr &MI1,
430 const MachineRegisterInfo *MRI) const {
432}
433
436 MachineBasicBlock::iterator InsertBefore,
437 const MachineInstr &Orig) const {
439 // CFI instructions are marked as non-duplicable, because Darwin compact
440 // unwind info emission can't handle multiple prologue setups.
441 assert((!Orig.isNotDuplicable() ||
443 Orig.isCFIInstruction())) &&
444 "Instruction cannot be duplicated");
445
446 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
447}
448
449// If the COPY instruction in MI can be folded to a stack operation, return
450// the register class to use.
452 const TargetInstrInfo &TII,
453 unsigned FoldIdx) {
454 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
455 if (MI.getNumOperands() != 2)
456 return nullptr;
457 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
458
459 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
460 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
461
462 if (FoldOp.getSubReg() || LiveOp.getSubReg())
463 return nullptr;
464
465 Register FoldReg = FoldOp.getReg();
466 Register LiveReg = LiveOp.getReg();
467
468 assert(FoldReg.isVirtual() && "Cannot fold physregs");
469
470 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
471 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
472
473 if (LiveOp.getReg().isPhysical())
474 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
475
476 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
477 return RC;
478
479 // FIXME: Allow folding when register classes are memory compatible.
480 return nullptr;
481}
482
483MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
484
485std::pair<unsigned, unsigned>
487 switch (MI.getOpcode()) {
488 case TargetOpcode::STACKMAP:
489 // StackMapLiveValues are foldable
490 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
491 case TargetOpcode::PATCHPOINT:
492 // For PatchPoint, the call args are not foldable (even if reported in the
493 // stackmap e.g. via anyregcc).
494 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
495 case TargetOpcode::STATEPOINT:
496 // For statepoints, fold deopt and gc arguments, but not call arguments.
497 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
498 default:
499 llvm_unreachable("unexpected stackmap opcode");
500 }
501}
502
504 ArrayRef<unsigned> Ops, int FrameIndex,
505 const TargetInstrInfo &TII) {
506 unsigned StartIdx = 0;
507 unsigned NumDefs = 0;
508 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
509 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
510
511 unsigned DefToFoldIdx = MI.getNumOperands();
512
513 // Return false if any operands requested for folding are not foldable (not
514 // part of the stackmap's live values).
515 for (unsigned Op : Ops) {
516 if (Op < NumDefs) {
517 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
518 DefToFoldIdx = Op;
519 } else if (Op < StartIdx) {
520 return nullptr;
521 }
522 if (MI.getOperand(Op).isTied())
523 return nullptr;
524 }
525
526 MachineInstr *NewMI =
527 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
528 MachineInstrBuilder MIB(MF, NewMI);
529
530 // No need to fold return, the meta data, and function arguments
531 for (unsigned i = 0; i < StartIdx; ++i)
532 if (i != DefToFoldIdx)
533 MIB.add(MI.getOperand(i));
534
535 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
536 MachineOperand &MO = MI.getOperand(i);
537 unsigned TiedTo = e;
538 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
539
540 if (is_contained(Ops, i)) {
541 assert(TiedTo == e && "Cannot fold tied operands");
542 unsigned SpillSize;
543 unsigned SpillOffset;
544 // Compute the spill slot size and offset.
545 const TargetRegisterClass *RC =
546 MF.getRegInfo().getRegClass(MO.getReg());
547 bool Valid =
548 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
549 if (!Valid)
550 report_fatal_error("cannot spill patchpoint subregister operand");
551 MIB.addImm(StackMaps::IndirectMemRefOp);
552 MIB.addImm(SpillSize);
553 MIB.addFrameIndex(FrameIndex);
554 MIB.addImm(SpillOffset);
555 } else {
556 MIB.add(MO);
557 if (TiedTo < e) {
558 assert(TiedTo < NumDefs && "Bad tied operand");
559 if (TiedTo > DefToFoldIdx)
560 --TiedTo;
561 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
562 }
563 }
564 }
565 return NewMI;
566}
567
568static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
569 const TargetInstrInfo &TII) {
570 // If the machine operand is tied, untie it first.
571 if (MI->getOperand(OpNo).isTied()) {
572 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
573 MI->untieRegOperand(OpNo);
574 // Intentional recursion!
575 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
576 }
577
579 TII.getFrameIndexOperands(NewOps, FI);
580 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
581 MI->removeOperand(OpNo);
582 MI->insert(MI->operands_begin() + OpNo, NewOps);
583
584 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
585 // is the per-target number of operands that represent the memory operand
586 // excluding this one (MD). This includes MO.
588 F.setMemConstraint(InlineAsm::ConstraintCode::m);
589 MachineOperand &MD = MI->getOperand(OpNo - 1);
590 MD.setImm(F);
591}
592
593// Returns nullptr if not possible to fold.
595 ArrayRef<unsigned> Ops, int FI,
596 const TargetInstrInfo &TII) {
597 assert(MI.isInlineAsm() && "wrong opcode");
598 if (Ops.size() > 1)
599 return nullptr;
600 unsigned Op = Ops[0];
601 assert(Op && "should never be first operand");
602 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
603
604 if (!MI.mayFoldInlineAsmRegOp(Op))
605 return nullptr;
606
607 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
608
609 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
610
611 // Update mayload/maystore metadata, and memoperands.
612 const VirtRegInfo &RI =
613 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
616 if (RI.Reads) {
617 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
619 }
620 if (RI.Writes) {
621 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
623 }
624 MachineFunction *MF = NewMI.getMF();
625 const MachineFrameInfo &MFI = MF->getFrameInfo();
627 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
628 MFI.getObjectAlign(FI));
629 NewMI.addMemOperand(*MF, MMO);
630
631 return &NewMI;
632}
633
635 ArrayRef<unsigned> Ops, int FI,
636 LiveIntervals *LIS,
637 VirtRegMap *VRM) const {
638 auto Flags = MachineMemOperand::MONone;
639 for (unsigned OpIdx : Ops)
640 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
642
643 MachineBasicBlock *MBB = MI.getParent();
644 assert(MBB && "foldMemoryOperand needs an inserted instruction");
645 MachineFunction &MF = *MBB->getParent();
646
647 // If we're not folding a load into a subreg, the size of the load is the
648 // size of the spill slot. But if we are, we need to figure out what the
649 // actual load size is.
650 int64_t MemSize = 0;
651 const MachineFrameInfo &MFI = MF.getFrameInfo();
653
654 if (Flags & MachineMemOperand::MOStore) {
655 MemSize = MFI.getObjectSize(FI);
656 } else {
657 for (unsigned OpIdx : Ops) {
658 int64_t OpSize = MFI.getObjectSize(FI);
659
660 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
661 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
662 if (SubRegSize > 0 && !(SubRegSize % 8))
663 OpSize = SubRegSize / 8;
664 }
665
666 MemSize = std::max(MemSize, OpSize);
667 }
668 }
669
670 assert(MemSize && "Did not expect a zero-sized stack slot");
671
672 MachineInstr *NewMI = nullptr;
673
674 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
675 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
676 MI.getOpcode() == TargetOpcode::STATEPOINT) {
677 // Fold stackmap/patchpoint.
678 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
679 if (NewMI)
680 MBB->insert(MI, NewMI);
681 } else if (MI.isInlineAsm()) {
682 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
683 } else {
684 // Ask the target to do the actual folding.
685 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
686 }
687
688 if (NewMI) {
689 NewMI->setMemRefs(MF, MI.memoperands());
690 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
692 NewMI->mayStore()) &&
693 "Folded a def to a non-store!");
694 assert((!(Flags & MachineMemOperand::MOLoad) ||
695 NewMI->mayLoad()) &&
696 "Folded a use to a non-load!");
697 assert(MFI.getObjectOffset(FI) != -1);
698 MachineMemOperand *MMO =
700 Flags, MemSize, MFI.getObjectAlign(FI));
701 NewMI->addMemOperand(MF, MMO);
702
703 // The pass "x86 speculative load hardening" always attaches symbols to
704 // call instructions. We need copy it form old instruction.
705 NewMI->cloneInstrSymbols(MF, MI);
706
707 return NewMI;
708 }
709
710 // Straight COPY may fold as load/store.
711 if (!isCopyInstr(MI) || Ops.size() != 1)
712 return nullptr;
713
714 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
715 if (!RC)
716 return nullptr;
717
718 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
720
721 if (Flags == MachineMemOperand::MOStore)
722 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
723 Register());
724 else
725 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
726 return &*--Pos;
727}
728
731 MachineInstr &LoadMI,
732 LiveIntervals *LIS) const {
733 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
734#ifndef NDEBUG
735 for (unsigned OpIdx : Ops)
736 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
737#endif
738
739 MachineBasicBlock &MBB = *MI.getParent();
741
742 // Ask the target to do the actual folding.
743 MachineInstr *NewMI = nullptr;
744 int FrameIndex = 0;
745
746 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
747 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
748 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
749 isLoadFromStackSlot(LoadMI, FrameIndex)) {
750 // Fold stackmap/patchpoint.
751 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
752 if (NewMI)
753 NewMI = &*MBB.insert(MI, NewMI);
754 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
755 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
756 } else {
757 // Ask the target to do the actual folding.
758 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
759 }
760
761 if (!NewMI)
762 return nullptr;
763
764 // Copy the memoperands from the load to the folded instruction.
765 if (MI.memoperands_empty()) {
766 NewMI->setMemRefs(MF, LoadMI.memoperands());
767 } else {
768 // Handle the rare case of folding multiple loads.
769 NewMI->setMemRefs(MF, MI.memoperands());
771 E = LoadMI.memoperands_end();
772 I != E; ++I) {
773 NewMI->addMemOperand(MF, *I);
774 }
775 }
776 return NewMI;
777}
778
779/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
780/// replacement instructions immediately precede it. Copy any implicit
781/// operands from MI to the replacement instruction.
783 const TargetRegisterInfo *TRI) {
785 --CopyMI;
786
787 Register DstReg = MI->getOperand(0).getReg();
788 for (const MachineOperand &MO : MI->implicit_operands()) {
789 CopyMI->addOperand(MO);
790
791 // Be conservative about preserving kills when subregister defs are
792 // involved. If there was implicit kill of a super-register overlapping the
793 // copy result, we would kill the subregisters previous copies defined.
794
795 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
796 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
797 }
798}
799
801 const TargetRegisterInfo *TRI) const {
802 if (MI->allDefsAreDead()) {
803 MI->setDesc(get(TargetOpcode::KILL));
804 return;
805 }
806
807 MachineOperand &DstMO = MI->getOperand(0);
808 MachineOperand &SrcMO = MI->getOperand(1);
809
810 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
811 if (IdentityCopy || SrcMO.isUndef()) {
812 // No need to insert an identity copy instruction, but replace with a KILL
813 // if liveness is changed.
814 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
815 // We must make sure the super-register gets killed. Replace the
816 // instruction with KILL.
817 MI->setDesc(get(TargetOpcode::KILL));
818 return;
819 }
820 // Vanilla identity copy.
821 MI->eraseFromParent();
822 return;
823 }
824
825 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
826 SrcMO.getReg(), SrcMO.isKill());
827
828 if (MI->getNumOperands() > 2)
830 MI->eraseFromParent();
831}
832
834 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
835 const MachineOperand &Op1 = Inst.getOperand(1);
836 const MachineOperand &Op2 = Inst.getOperand(2);
838
839 // We need virtual register definitions for the operands that we will
840 // reassociate.
841 MachineInstr *MI1 = nullptr;
842 MachineInstr *MI2 = nullptr;
843 if (Op1.isReg() && Op1.getReg().isVirtual())
844 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
845 if (Op2.isReg() && Op2.getReg().isVirtual())
846 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
847
848 // And at least one operand must be defined in MBB.
849 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
850}
851
853 unsigned Opcode2) const {
854 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
855}
856
858 bool &Commuted) const {
859 const MachineBasicBlock *MBB = Inst.getParent();
861 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
862 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
863 unsigned Opcode = Inst.getOpcode();
864
865 // If only one operand has the same or inverse opcode and it's the second
866 // source operand, the operands must be commuted.
867 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
868 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
869 if (Commuted)
870 std::swap(MI1, MI2);
871
872 // 1. The previous instruction must be the same type as Inst.
873 // 2. The previous instruction must also be associative/commutative or be the
874 // inverse of such an operation (this can be different even for
875 // instructions with the same opcode if traits like fast-math-flags are
876 // included).
877 // 3. The previous instruction must have virtual register definitions for its
878 // operands in the same basic block as Inst.
879 // 4. The previous instruction's result must only be used by Inst.
880 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
882 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
884 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
885}
886
887// 1. The operation must be associative and commutative or be the inverse of
888// such an operation.
889// 2. The instruction must have virtual register definitions for its
890// operands in the same basic block.
891// 3. The instruction must have a reassociable sibling.
893 bool &Commuted) const {
894 return (isAssociativeAndCommutative(Inst) ||
895 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
896 hasReassociableOperands(Inst, Inst.getParent()) &&
897 hasReassociableSibling(Inst, Commuted);
898}
899
900// The concept of the reassociation pass is that these operations can benefit
901// from this kind of transformation:
902//
903// A = ? op ?
904// B = A op X (Prev)
905// C = B op Y (Root)
906// -->
907// A = ? op ?
908// B = X op Y
909// C = A op B
910//
911// breaking the dependency between A and B, allowing them to be executed in
912// parallel (or back-to-back in a pipeline) instead of depending on each other.
913
914// FIXME: This has the potential to be expensive (compile time) while not
915// improving the code at all. Some ways to limit the overhead:
916// 1. Track successful transforms; bail out if hit rate gets too low.
917// 2. Only enable at -O3 or some other non-default optimization level.
918// 3. Pre-screen pattern candidates here: if an operand of the previous
919// instruction is known to not increase the critical path, then don't match
920// that pattern.
923 bool DoRegPressureReduce) const {
924 bool Commute;
925 if (isReassociationCandidate(Root, Commute)) {
926 // We found a sequence of instructions that may be suitable for a
927 // reassociation of operands to increase ILP. Specify each commutation
928 // possibility for the Prev instruction in the sequence and let the
929 // machine combiner decide if changing the operands is worthwhile.
930 if (Commute) {
933 } else {
936 }
937 return true;
938 }
939
940 return false;
941}
942
943/// Return true when a code sequence can improve loop throughput.
945 return false;
946}
947
951}
952
953std::pair<unsigned, unsigned>
955 const MachineInstr &Root,
956 const MachineInstr &Prev) const {
957 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
958 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
959
960 // Early exit if both opcodes are associative and commutative. It's a trivial
961 // reassociation when we only change operands order. In this case opcodes are
962 // not required to have inverse versions.
963 if (AssocCommutRoot && AssocCommutPrev) {
964 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
965 return std::make_pair(Root.getOpcode(), Root.getOpcode());
966 }
967
968 // At least one instruction is not associative or commutative.
969 // Since we have matched one of the reassociation patterns, we expect that the
970 // instructions' opcodes are equal or one of them is the inversion of the
971 // other.
973 "Incorrectly matched pattern");
974 unsigned AssocCommutOpcode = Root.getOpcode();
975 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
976 if (!AssocCommutRoot)
977 std::swap(AssocCommutOpcode, InverseOpcode);
978
979 // The transformation rule (`+` is any associative and commutative binary
980 // operation, `-` is the inverse):
981 // REASSOC_AX_BY:
982 // (A + X) + Y => A + (X + Y)
983 // (A + X) - Y => A + (X - Y)
984 // (A - X) + Y => A - (X - Y)
985 // (A - X) - Y => A - (X + Y)
986 // REASSOC_XA_BY:
987 // (X + A) + Y => (X + Y) + A
988 // (X + A) - Y => (X - Y) + A
989 // (X - A) + Y => (X + Y) - A
990 // (X - A) - Y => (X - Y) - A
991 // REASSOC_AX_YB:
992 // Y + (A + X) => (Y + X) + A
993 // Y - (A + X) => (Y - X) - A
994 // Y + (A - X) => (Y - X) + A
995 // Y - (A - X) => (Y + X) - A
996 // REASSOC_XA_YB:
997 // Y + (X + A) => (Y + X) + A
998 // Y - (X + A) => (Y - X) - A
999 // Y + (X - A) => (Y + X) - A
1000 // Y - (X - A) => (Y - X) + A
1001 switch (Pattern) {
1002 default:
1003 llvm_unreachable("Unexpected pattern");
1005 if (!AssocCommutRoot && AssocCommutPrev)
1006 return {AssocCommutOpcode, InverseOpcode};
1007 if (AssocCommutRoot && !AssocCommutPrev)
1008 return {InverseOpcode, InverseOpcode};
1009 if (!AssocCommutRoot && !AssocCommutPrev)
1010 return {InverseOpcode, AssocCommutOpcode};
1011 break;
1013 if (!AssocCommutRoot && AssocCommutPrev)
1014 return {AssocCommutOpcode, InverseOpcode};
1015 if (AssocCommutRoot && !AssocCommutPrev)
1016 return {InverseOpcode, AssocCommutOpcode};
1017 if (!AssocCommutRoot && !AssocCommutPrev)
1018 return {InverseOpcode, InverseOpcode};
1019 break;
1021 if (!AssocCommutRoot && AssocCommutPrev)
1022 return {InverseOpcode, InverseOpcode};
1023 if (AssocCommutRoot && !AssocCommutPrev)
1024 return {AssocCommutOpcode, InverseOpcode};
1025 if (!AssocCommutRoot && !AssocCommutPrev)
1026 return {InverseOpcode, AssocCommutOpcode};
1027 break;
1029 if (!AssocCommutRoot && AssocCommutPrev)
1030 return {InverseOpcode, InverseOpcode};
1031 if (AssocCommutRoot && !AssocCommutPrev)
1032 return {InverseOpcode, AssocCommutOpcode};
1033 if (!AssocCommutRoot && !AssocCommutPrev)
1034 return {AssocCommutOpcode, InverseOpcode};
1035 break;
1036 }
1037 llvm_unreachable("Unhandled combination");
1038}
1039
1040// Return a pair of boolean flags showing if the new root and new prev operands
1041// must be swapped. See visual example of the rule in
1042// TargetInstrInfo::getReassociationOpcodes.
1043static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1044 switch (Pattern) {
1045 default:
1046 llvm_unreachable("Unexpected pattern");
1048 return {false, false};
1050 return {true, false};
1052 return {true, true};
1054 return {true, true};
1055 }
1056}
1057
1059 const MachineInstr &Root, unsigned Pattern,
1060 std::array<unsigned, 5> &OperandIndices) const {
1061 switch (Pattern) {
1063 OperandIndices = {1, 1, 1, 2, 2};
1064 break;
1066 OperandIndices = {2, 1, 2, 2, 1};
1067 break;
1069 OperandIndices = {1, 2, 1, 1, 2};
1070 break;
1072 OperandIndices = {2, 2, 2, 1, 1};
1073 break;
1074 default:
1075 llvm_unreachable("unexpected MachineCombinerPattern");
1076 }
1077}
1078
1079/// Attempt the reassociation transformation to reduce critical path length.
1080/// See the above comments before getMachineCombinerPatterns().
1082 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1086 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1087 MachineFunction *MF = Root.getMF();
1091 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
1092
1097 MachineOperand &OpC = Root.getOperand(0);
1098
1099 Register RegA = OpA.getReg();
1100 Register RegB = OpB.getReg();
1101 Register RegX = OpX.getReg();
1102 Register RegY = OpY.getReg();
1103 Register RegC = OpC.getReg();
1104
1105 if (RegA.isVirtual())
1106 MRI.constrainRegClass(RegA, RC);
1107 if (RegB.isVirtual())
1108 MRI.constrainRegClass(RegB, RC);
1109 if (RegX.isVirtual())
1110 MRI.constrainRegClass(RegX, RC);
1111 if (RegY.isVirtual())
1112 MRI.constrainRegClass(RegY, RC);
1113 if (RegC.isVirtual())
1114 MRI.constrainRegClass(RegC, RC);
1115
1116 // Create a new virtual register for the result of (X op Y) instead of
1117 // recycling RegB because the MachineCombiner's computation of the critical
1118 // path requires a new register definition rather than an existing one.
1119 Register NewVR = MRI.createVirtualRegister(RC);
1120 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1121
1122 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1123 bool KillA = OpA.isKill();
1124 bool KillX = OpX.isKill();
1125 bool KillY = OpY.isKill();
1126 bool KillNewVR = true;
1127
1128 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1129
1130 if (SwapPrevOperands) {
1131 std::swap(RegX, RegY);
1132 std::swap(KillX, KillY);
1133 }
1134
1135 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1136 unsigned RootFirstOpIdx, RootSecondOpIdx;
1137 switch (Pattern) {
1139 PrevFirstOpIdx = OperandIndices[1];
1140 PrevSecondOpIdx = OperandIndices[3];
1141 RootFirstOpIdx = OperandIndices[2];
1142 RootSecondOpIdx = OperandIndices[4];
1143 break;
1145 PrevFirstOpIdx = OperandIndices[1];
1146 PrevSecondOpIdx = OperandIndices[3];
1147 RootFirstOpIdx = OperandIndices[4];
1148 RootSecondOpIdx = OperandIndices[2];
1149 break;
1151 PrevFirstOpIdx = OperandIndices[3];
1152 PrevSecondOpIdx = OperandIndices[1];
1153 RootFirstOpIdx = OperandIndices[2];
1154 RootSecondOpIdx = OperandIndices[4];
1155 break;
1157 PrevFirstOpIdx = OperandIndices[3];
1158 PrevSecondOpIdx = OperandIndices[1];
1159 RootFirstOpIdx = OperandIndices[4];
1160 RootSecondOpIdx = OperandIndices[2];
1161 break;
1162 default:
1163 llvm_unreachable("unexpected MachineCombinerPattern");
1164 }
1165
1166 // Basically BuildMI but doesn't add implicit operands by default.
1167 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1168 const MCInstrDesc &MCID, Register DestReg) {
1169 return MachineInstrBuilder(
1170 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1171 .setPCSections(MIMD.getPCSections())
1172 .addReg(DestReg, RegState::Define);
1173 };
1174
1175 // Create new instructions for insertion.
1176 MachineInstrBuilder MIB1 =
1177 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1178 for (const auto &MO : Prev.explicit_operands()) {
1179 unsigned Idx = MO.getOperandNo();
1180 // Skip the result operand we'd already added.
1181 if (Idx == 0)
1182 continue;
1183 if (Idx == PrevFirstOpIdx)
1184 MIB1.addReg(RegX, getKillRegState(KillX));
1185 else if (Idx == PrevSecondOpIdx)
1186 MIB1.addReg(RegY, getKillRegState(KillY));
1187 else
1188 MIB1.add(MO);
1189 }
1190 MIB1.copyImplicitOps(Prev);
1191
1192 if (SwapRootOperands) {
1193 std::swap(RegA, NewVR);
1194 std::swap(KillA, KillNewVR);
1195 }
1196
1197 MachineInstrBuilder MIB2 =
1198 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1199 for (const auto &MO : Root.explicit_operands()) {
1200 unsigned Idx = MO.getOperandNo();
1201 // Skip the result operand.
1202 if (Idx == 0)
1203 continue;
1204 if (Idx == RootFirstOpIdx)
1205 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
1206 else if (Idx == RootSecondOpIdx)
1207 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
1208 else
1209 MIB2 = MIB2.add(MO);
1210 }
1211 MIB2.copyImplicitOps(Root);
1212
1213 // Propagate FP flags from the original instructions.
1214 // But clear poison-generating flags because those may not be valid now.
1215 // TODO: There should be a helper function for copying only fast-math-flags.
1216 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1217 MIB1->setFlags(IntersectedFlags);
1221
1222 MIB2->setFlags(IntersectedFlags);
1226
1227 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1228
1229 // Record new instructions for insertion and old instructions for deletion.
1230 InsInstrs.push_back(MIB1);
1231 InsInstrs.push_back(MIB2);
1232 DelInstrs.push_back(&Prev);
1233 DelInstrs.push_back(&Root);
1234
1235 // We transformed:
1236 // B = A op X (Prev)
1237 // C = B op Y (Root)
1238 // Into:
1239 // B = X op Y (MIB1)
1240 // C = A op B (MIB2)
1241 // C has the same value as before, B doesn't; as such, keep the debug number
1242 // of C but not of B.
1243 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1244 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1245}
1246
1248 MachineInstr &Root, unsigned Pattern,
1251 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1253
1254 // Select the previous instruction in the sequence based on the input pattern.
1255 std::array<unsigned, 5> OperandIndices;
1257 MachineInstr *Prev =
1258 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1259
1260 // Don't reassociate if Prev and Root are in different blocks.
1261 if (Prev->getParent() != Root.getParent())
1262 return;
1263
1264 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1265 InstIdxForVirtReg);
1266}
1267
1270}
1271
1273 const MachineInstr &MI) const {
1274 const MachineFunction &MF = *MI.getMF();
1275 const MachineRegisterInfo &MRI = MF.getRegInfo();
1276
1277 // Remat clients assume operand 0 is the defined register.
1278 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1279 return false;
1280 Register DefReg = MI.getOperand(0).getReg();
1281
1282 // A sub-register definition can only be rematerialized if the instruction
1283 // doesn't read the other parts of the register. Otherwise it is really a
1284 // read-modify-write operation on the full virtual register which cannot be
1285 // moved safely.
1286 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1287 MI.readsVirtualRegister(DefReg))
1288 return false;
1289
1290 // A load from a fixed stack slot can be rematerialized. This may be
1291 // redundant with subsequent checks, but it's target-independent,
1292 // simple, and a common case.
1293 int FrameIdx = 0;
1294 if (isLoadFromStackSlot(MI, FrameIdx) &&
1295 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1296 return true;
1297
1298 // Avoid instructions obviously unsafe for remat.
1299 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1300 MI.hasUnmodeledSideEffects())
1301 return false;
1302
1303 // Don't remat inline asm. We have no idea how expensive it is
1304 // even if it's side effect free.
1305 if (MI.isInlineAsm())
1306 return false;
1307
1308 // Avoid instructions which load from potentially varying memory.
1309 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1310 return false;
1311
1312 // If any of the registers accessed are non-constant, conservatively assume
1313 // the instruction is not rematerializable.
1314 for (const MachineOperand &MO : MI.operands()) {
1315 if (!MO.isReg()) continue;
1316 Register Reg = MO.getReg();
1317 if (Reg == 0)
1318 continue;
1319
1320 // Check for a well-behaved physical register.
1321 if (Reg.isPhysical()) {
1322 if (MO.isUse()) {
1323 // If the physreg has no defs anywhere, it's just an ambient register
1324 // and we can freely move its uses. Alternatively, if it's allocatable,
1325 // it could get allocated to something with a def during allocation.
1326 if (!MRI.isConstantPhysReg(Reg))
1327 return false;
1328 } else {
1329 // A physreg def. We can't remat it.
1330 return false;
1331 }
1332 continue;
1333 }
1334
1335 // Only allow one virtual-register def. There may be multiple defs of the
1336 // same virtual register, though.
1337 if (MO.isDef() && Reg != DefReg)
1338 return false;
1339
1340 // Don't allow any virtual-register uses. Rematting an instruction with
1341 // virtual register uses would length the live ranges of the uses, which
1342 // is not necessarily a good idea, certainly not "trivial".
1343 if (MO.isUse())
1344 return false;
1345 }
1346
1347 // Everything checked out.
1348 return true;
1349}
1350
1352 const MachineFunction *MF = MI.getMF();
1354 bool StackGrowsDown =
1356
1357 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1358 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1359
1360 if (!isFrameInstr(MI))
1361 return 0;
1362
1363 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1364
1365 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1366 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1367 SPAdj = -SPAdj;
1368
1369 return SPAdj;
1370}
1371
1372/// isSchedulingBoundary - Test if the given instruction should be
1373/// considered a scheduling boundary. This primarily includes labels
1374/// and terminators.
1376 const MachineBasicBlock *MBB,
1377 const MachineFunction &MF) const {
1378 // Terminators and labels can't be scheduled around.
1379 if (MI.isTerminator() || MI.isPosition())
1380 return true;
1381
1382 // INLINEASM_BR can jump to another block
1383 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1384 return true;
1385
1386 // Don't attempt to schedule around any instruction that defines
1387 // a stack-oriented pointer, as it's unlikely to be profitable. This
1388 // saves compile time, because it doesn't require every single
1389 // stack slot reference to depend on the instruction that does the
1390 // modification.
1391 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1393 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1394}
1395
1396// Provide a global flag for disabling the PreRA hazard recognizer that targets
1397// may choose to honor.
1400}
1401
1402// Default implementation of CreateTargetRAHazardRecognizer.
1405 const ScheduleDAG *DAG) const {
1406 // Dummy hazard recognizer allows all instructions to issue.
1407 return new ScheduleHazardRecognizer();
1408}
1409
1410// Default implementation of CreateTargetMIHazardRecognizer.
1412 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1413 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1414}
1415
1416// Default implementation of CreateTargetPostRAHazardRecognizer.
1419 const ScheduleDAG *DAG) const {
1420 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1421}
1422
1423// Default implementation of getMemOperandWithOffset.
1425 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1426 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1428 LocationSize Width = 0;
1429 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1430 Width, TRI) ||
1431 BaseOps.size() != 1)
1432 return false;
1433 BaseOp = BaseOps.front();
1434 return true;
1435}
1436
1437//===----------------------------------------------------------------------===//
1438// SelectionDAG latency interface.
1439//===----------------------------------------------------------------------===//
1440
1441std::optional<unsigned>
1443 SDNode *DefNode, unsigned DefIdx,
1444 SDNode *UseNode, unsigned UseIdx) const {
1445 if (!ItinData || ItinData->isEmpty())
1446 return std::nullopt;
1447
1448 if (!DefNode->isMachineOpcode())
1449 return std::nullopt;
1450
1451 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1452 if (!UseNode->isMachineOpcode())
1453 return ItinData->getOperandCycle(DefClass, DefIdx);
1454 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1455 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1456}
1457
1459 SDNode *N) const {
1460 if (!ItinData || ItinData->isEmpty())
1461 return 1;
1462
1463 if (!N->isMachineOpcode())
1464 return 1;
1465
1466 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1467}
1468
1469//===----------------------------------------------------------------------===//
1470// MachineInstr latency interface.
1471//===----------------------------------------------------------------------===//
1472
1474 const MachineInstr &MI) const {
1475 if (!ItinData || ItinData->isEmpty())
1476 return 1;
1477
1478 unsigned Class = MI.getDesc().getSchedClass();
1479 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1480 if (UOps >= 0)
1481 return UOps;
1482
1483 // The # of u-ops is dynamically determined. The specific target should
1484 // override this function to return the right number.
1485 return 1;
1486}
1487
1488/// Return the default expected latency for a def based on it's opcode.
1490 const MachineInstr &DefMI) const {
1491 if (DefMI.isTransient())
1492 return 0;
1493 if (DefMI.mayLoad())
1494 return SchedModel.LoadLatency;
1495 if (isHighLatencyDef(DefMI.getOpcode()))
1496 return SchedModel.HighLatency;
1497 return 1;
1498}
1499
1501 return 0;
1502}
1503
1505 const MachineInstr &MI,
1506 unsigned *PredCost) const {
1507 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1508 // still have a MinLatency property, which getStageLatency checks.
1509 if (!ItinData)
1510 return MI.mayLoad() ? 2 : 1;
1511
1512 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1513}
1514
1516 const MachineInstr &DefMI,
1517 unsigned DefIdx) const {
1518 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1519 if (!ItinData || ItinData->isEmpty())
1520 return false;
1521
1522 unsigned DefClass = DefMI.getDesc().getSchedClass();
1523 std::optional<unsigned> DefCycle =
1524 ItinData->getOperandCycle(DefClass, DefIdx);
1525 return DefCycle && DefCycle <= 1U;
1526}
1527
1529 // TODO: We don't split functions where a section attribute has been set
1530 // since the split part may not be placed in a contiguous region. It may also
1531 // be more beneficial to augment the linker to ensure contiguous layout of
1532 // split functions within the same section as specified by the attribute.
1533 if (MF.getFunction().hasSection())
1534 return false;
1535
1536 // We don't want to proceed further for cold functions
1537 // or functions of unknown hotness. Lukewarm functions have no prefix.
1538 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1539 if (SectionPrefix &&
1540 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1541 return false;
1542 }
1543
1544 return true;
1545}
1546
1547std::optional<ParamLoadedValue>
1549 Register Reg) const {
1550 const MachineFunction *MF = MI.getMF();
1553 int64_t Offset;
1554 bool OffsetIsScalable;
1555
1556 // To simplify the sub-register handling, verify that we only need to
1557 // consider physical registers.
1560
1561 if (auto DestSrc = isCopyInstr(MI)) {
1562 Register DestReg = DestSrc->Destination->getReg();
1563
1564 // If the copy destination is the forwarding reg, describe the forwarding
1565 // reg using the copy source as the backup location. Example:
1566 //
1567 // x0 = MOV x7
1568 // call callee(x0) ; x0 described as x7
1569 if (Reg == DestReg)
1570 return ParamLoadedValue(*DestSrc->Source, Expr);
1571
1572 // If the target's hook couldn't describe this copy, give up.
1573 return std::nullopt;
1574 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1575 Register SrcReg = RegImm->Reg;
1576 Offset = RegImm->Imm;
1578 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1579 } else if (MI.hasOneMemOperand()) {
1580 // Only describe memory which provably does not escape the function. As
1581 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1582 // callee (or by another thread).
1583 const auto &TII = MF->getSubtarget().getInstrInfo();
1584 const MachineFrameInfo &MFI = MF->getFrameInfo();
1585 const MachineMemOperand *MMO = MI.memoperands()[0];
1586 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1587
1588 // If the address points to "special" memory (e.g. a spill slot), it's
1589 // sufficient to check that it isn't aliased by any high-level IR value.
1590 if (!PSV || PSV->mayAlias(&MFI))
1591 return std::nullopt;
1592
1593 const MachineOperand *BaseOp;
1594 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1595 TRI))
1596 return std::nullopt;
1597
1598 // FIXME: Scalable offsets are not yet handled in the offset code below.
1599 if (OffsetIsScalable)
1600 return std::nullopt;
1601
1602 // TODO: Can currently only handle mem instructions with a single define.
1603 // An example from the x86 target:
1604 // ...
1605 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1606 // ...
1607 //
1608 if (MI.getNumExplicitDefs() != 1)
1609 return std::nullopt;
1610
1611 // TODO: In what way do we need to take Reg into consideration here?
1612
1615 Ops.push_back(dwarf::DW_OP_deref_size);
1616 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1617 : ~UINT64_C(0));
1618 Expr = DIExpression::prependOpcodes(Expr, Ops);
1619 return ParamLoadedValue(*BaseOp, Expr);
1620 }
1621
1622 return std::nullopt;
1623}
1624
1625// Get the call frame size just before MI.
1627 // Search backwards from MI for the most recent call frame instruction.
1628 MachineBasicBlock *MBB = MI.getParent();
1629 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1630 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1631 return getFrameTotalSize(AdjI);
1632 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1633 return 0;
1634 }
1635
1636 // If none was found, use the call frame size from the start of the basic
1637 // block.
1638 return MBB->getCallFrameSize();
1639}
1640
1641/// Both DefMI and UseMI must be valid. By default, call directly to the
1642/// itinerary. This may be overriden by the target.
1644 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1645 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1646 unsigned DefClass = DefMI.getDesc().getSchedClass();
1647 unsigned UseClass = UseMI.getDesc().getSchedClass();
1648 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1649}
1650
1652 const MachineInstr &MI, unsigned DefIdx,
1653 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1654 assert((MI.isRegSequence() ||
1655 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1656
1657 if (!MI.isRegSequence())
1658 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1659
1660 // We are looking at:
1661 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1662 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1663 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1664 OpIdx += 2) {
1665 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1666 if (MOReg.isUndef())
1667 continue;
1668 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1669 assert(MOSubIdx.isImm() &&
1670 "One of the subindex of the reg_sequence is not an immediate");
1671 // Record Reg:SubReg, SubIdx.
1672 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1673 (unsigned)MOSubIdx.getImm()));
1674 }
1675 return true;
1676}
1677
1679 const MachineInstr &MI, unsigned DefIdx,
1680 RegSubRegPairAndIdx &InputReg) const {
1681 assert((MI.isExtractSubreg() ||
1682 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1683
1684 if (!MI.isExtractSubreg())
1685 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1686
1687 // We are looking at:
1688 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1689 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1690 const MachineOperand &MOReg = MI.getOperand(1);
1691 if (MOReg.isUndef())
1692 return false;
1693 const MachineOperand &MOSubIdx = MI.getOperand(2);
1694 assert(MOSubIdx.isImm() &&
1695 "The subindex of the extract_subreg is not an immediate");
1696
1697 InputReg.Reg = MOReg.getReg();
1698 InputReg.SubReg = MOReg.getSubReg();
1699 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1700 return true;
1701}
1702
1704 const MachineInstr &MI, unsigned DefIdx,
1705 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1706 assert((MI.isInsertSubreg() ||
1707 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1708
1709 if (!MI.isInsertSubreg())
1710 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1711
1712 // We are looking at:
1713 // Def = INSERT_SEQUENCE v0, v1, sub0.
1714 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1715 const MachineOperand &MOBaseReg = MI.getOperand(1);
1716 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1717 if (MOInsertedReg.isUndef())
1718 return false;
1719 const MachineOperand &MOSubIdx = MI.getOperand(3);
1720 assert(MOSubIdx.isImm() &&
1721 "One of the subindex of the reg_sequence is not an immediate");
1722 BaseReg.Reg = MOBaseReg.getReg();
1723 BaseReg.SubReg = MOBaseReg.getSubReg();
1724
1725 InsertedReg.Reg = MOInsertedReg.getReg();
1726 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1727 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1728 return true;
1729}
1730
1731// Returns a MIRPrinter comment for this machine operand.
1733 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1734 const TargetRegisterInfo *TRI) const {
1735
1736 if (!MI.isInlineAsm())
1737 return "";
1738
1739 std::string Flags;
1740 raw_string_ostream OS(Flags);
1741
1742 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1743 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1744 unsigned ExtraInfo = Op.getImm();
1745 bool First = true;
1746 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1747 if (!First)
1748 OS << " ";
1749 First = false;
1750 OS << Info;
1751 }
1752
1753 return Flags;
1754 }
1755
1756 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1757 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1758 return "";
1759
1760 assert(Op.isImm() && "Expected flag operand to be an immediate");
1761 // Pretty print the inline asm operand descriptor.
1762 unsigned Flag = Op.getImm();
1763 const InlineAsm::Flag F(Flag);
1764 OS << F.getKindName();
1765
1766 unsigned RCID;
1767 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1768 if (TRI) {
1769 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1770 } else
1771 OS << ":RC" << RCID;
1772 }
1773
1774 if (F.isMemKind()) {
1775 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1776 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1777 }
1778
1779 unsigned TiedTo;
1780 if (F.isUseOperandTiedToDef(TiedTo))
1781 OS << " tiedto:$" << TiedTo;
1782
1783 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
1784 F.getRegMayBeFolded())
1785 OS << " foldable";
1786
1787 return Flags;
1788}
1789
1791
1793 Function &F, std::vector<outliner::Candidate> &Candidates) const {
1794 // Include target features from an arbitrary candidate for the outlined
1795 // function. This makes sure the outlined function knows what kinds of
1796 // instructions are going into it. This is fine, since all parent functions
1797 // must necessarily support the instructions that are in the outlined region.
1798 outliner::Candidate &FirstCand = Candidates.front();
1799 const Function &ParentFn = FirstCand.getMF()->getFunction();
1800 if (ParentFn.hasFnAttribute("target-features"))
1801 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
1802 if (ParentFn.hasFnAttribute("target-cpu"))
1803 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
1804
1805 // Set nounwind, so we don't generate eh_frame.
1806 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
1807 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1808 }))
1809 F.addFnAttr(Attribute::NoUnwind);
1810}
1811
1815 unsigned Flags) const {
1816 MachineInstr &MI = *MIT;
1817
1818 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1819 // have support for outlining those. Special-case that here.
1820 if (MI.isCFIInstruction())
1821 // Just go right to the target implementation.
1822 return getOutliningTypeImpl(MMI, MIT, Flags);
1823
1824 // Be conservative about inline assembly.
1825 if (MI.isInlineAsm())
1827
1828 // Labels generally can't safely be outlined.
1829 if (MI.isLabel())
1831
1832 // Don't let debug instructions impact analysis.
1833 if (MI.isDebugInstr())
1835
1836 // Some other special cases.
1837 switch (MI.getOpcode()) {
1838 case TargetOpcode::IMPLICIT_DEF:
1839 case TargetOpcode::KILL:
1840 case TargetOpcode::LIFETIME_START:
1841 case TargetOpcode::LIFETIME_END:
1843 default:
1844 break;
1845 }
1846
1847 // Is this a terminator for a basic block?
1848 if (MI.isTerminator()) {
1849 // If this is a branch to another block, we can't outline it.
1850 if (!MI.getParent()->succ_empty())
1852
1853 // Don't outline if the branch is not unconditional.
1854 if (isPredicated(MI))
1856 }
1857
1858 // Make sure none of the operands of this instruction do anything that
1859 // might break if they're moved outside their current function.
1860 // This includes MachineBasicBlock references, BlockAddressses,
1861 // Constant pool indices and jump table indices.
1862 //
1863 // A quick note on MO_TargetIndex:
1864 // This doesn't seem to be used in any of the architectures that the
1865 // MachineOutliner supports, but it was still filtered out in all of them.
1866 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1867 // As such, this check is removed both here and in the target-specific
1868 // implementations. Instead, we assert to make sure this doesn't
1869 // catch anyone off-guard somewhere down the line.
1870 for (const MachineOperand &MOP : MI.operands()) {
1871 // If you hit this assertion, please remove it and adjust
1872 // `getOutliningTypeImpl` for your target appropriately if necessary.
1873 // Adding the assertion back to other supported architectures
1874 // would be nice too :)
1875 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
1876
1877 // CFI instructions should already have been filtered out at this point.
1878 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
1879
1880 // PrologEpilogInserter should've already run at this point.
1881 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
1882
1883 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1885 }
1886
1887 // If we don't know, delegate to the target-specific hook.
1888 return getOutliningTypeImpl(MMI, MIT, Flags);
1889}
1890
1892 unsigned &Flags) const {
1893 // Some instrumentations create special TargetOpcode at the start which
1894 // expands to special code sequences which must be present.
1896 if (First == MBB.end())
1897 return true;
1898
1899 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1900 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1901 return false;
1902
1903 // Some instrumentations create special pseudo-instructions at or just before
1904 // the end that must be present.
1905 auto Last = MBB.getLastNonDebugInstr();
1906 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
1907 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1908 return false;
1909
1910 if (Last != First && Last->isReturn()) {
1911 --Last;
1912 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
1913 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1914 return false;
1915 }
1916 return true;
1917}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
DWARF expression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
This class represents an Operation in the Expression.
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:195
A debug info location.
Definition: DebugLoc.h:33
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this function.
Definition: Function.cpp:2118
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:769
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:743
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition: InlineAsm.h:443
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:467
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition: MCAsmInfo.h:602
StringRef getCommentString() const
Definition: MCAsmInfo.h:615
const char * getSeparatorString() const
Definition: MCAsmInfo.h:610
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:600
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:481
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1542
Set of metadata that should be preserved when using BuildMI().
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr & cloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
void eraseCallSiteInfo(const MachineInstr *MI)
Following functions update call site info.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setPCSections(MDNode *MD) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
void setFlags(unsigned flags)
Definition: MachineInstr.h:409
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:572
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
Definition: MachineInstr.h:549
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:807
iterator_range< mop_iterator > explicit_operands()
Definition: MachineInstr.h:691
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
Definition: MachineInstr.h:545
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
Definition: MachineInstr.h:418
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:800
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:782
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:391
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
MI-level stackmap operands.
Definition: StackMaps.h:35
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Load the specified register of the given register class from the specified stack frame index.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const
Emit instructions to copy a pair of physical registers.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:558
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:253
unsigned LoadLatency
Definition: MCSchedule.h:294
unsigned HighLatency
Definition: MCSchedule.h:301
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const