LLVM 22.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
39
40using namespace llvm;
41
43 "disable-sched-hazard", cl::Hidden, cl::init(false),
44 cl::desc("Disable hazard detection during preRA scheduling"));
45
47 "acc-reassoc", cl::Hidden, cl::init(true),
48 cl::desc("Enable reassociation of accumulation chains"));
49
52 cl::desc("Minimum length of accumulator chains "
53 "required for the optimization to kick in"));
54
56 "acc-max-width", cl::Hidden, cl::init(3),
57 cl::desc("Maximum number of branches in the accumulator tree"));
58
60
62 unsigned OpNum) const {
63 if (OpNum >= MCID.getNumOperands())
64 return nullptr;
65
66 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
67 int16_t RegClass = getOpRegClassID(OpInfo);
68
69 // Instructions like INSERT_SUBREG do not have fixed register classes.
70 if (RegClass < 0)
71 return nullptr;
72
73 // Otherwise just look it up normally.
74 return TRI.getRegClass(RegClass);
75}
76
77/// insertNoop - Insert a noop into the instruction stream at the specified
78/// point.
81 llvm_unreachable("Target didn't implement insertNoop!");
82}
83
84/// insertNoops - Insert noops into the instruction stream at the specified
85/// point.
88 unsigned Quantity) const {
89 for (unsigned i = 0; i < Quantity; ++i)
91}
92
93static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
94 return strncmp(Str, MAI.getCommentString().data(),
95 MAI.getCommentString().size()) == 0;
96}
97
98/// Measure the specified inline asm to determine an approximation of its
99/// length.
100/// Comments (which run till the next SeparatorString or newline) do not
101/// count as an instruction.
102/// Any other non-whitespace text is considered an instruction, with
103/// multiple instructions separated by SeparatorString or newlines.
104/// Variable-length instructions are not handled here; this function
105/// may be overloaded in the target code to do that.
106/// We implement a special case of the .space directive which takes only a
107/// single integer argument in base 10 that is the size in bytes. This is a
108/// restricted form of the GAS directive in that we only interpret
109/// simple--i.e. not a logical or arithmetic expression--size values without
110/// the optional fill value. This is primarily used for creating arbitrary
111/// sized inline asm blocks for testing purposes.
113 const char *Str,
114 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
115 // Count the number of instructions in the asm.
116 bool AtInsnStart = true;
117 unsigned Length = 0;
118 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
119 for (; *Str; ++Str) {
120 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
121 strlen(MAI.getSeparatorString())) == 0) {
122 AtInsnStart = true;
123 } else if (isAsmComment(Str, MAI)) {
124 // Stop counting as an instruction after a comment until the next
125 // separator.
126 AtInsnStart = false;
127 }
128
129 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
130 unsigned AddLength = MaxInstLength;
131 if (strncmp(Str, ".space", 6) == 0) {
132 char *EStr;
133 int SpaceSize;
134 SpaceSize = strtol(Str + 6, &EStr, 10);
135 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
136 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
137 ++EStr;
138 if (*EStr == '\0' || *EStr == '\n' ||
139 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
140 AddLength = SpaceSize;
141 }
142 Length += AddLength;
143 AtInsnStart = false;
144 }
145 }
146
147 return Length;
148}
149
150/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
151/// after it, replacing it with an unconditional branch to NewDest.
152void
154 MachineBasicBlock *NewDest) const {
155 MachineBasicBlock *MBB = Tail->getParent();
156
157 // Remove all the old successors of MBB from the CFG.
158 while (!MBB->succ_empty())
159 MBB->removeSuccessor(MBB->succ_begin());
160
161 // Save off the debug loc before erasing the instruction.
162 DebugLoc DL = Tail->getDebugLoc();
163
164 // Update call info and remove all the dead instructions
165 // from the end of MBB.
166 while (Tail != MBB->end()) {
167 auto MI = Tail++;
168 if (MI->shouldUpdateAdditionalCallInfo())
169 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
170 MBB->erase(MI);
171 }
172
173 // If MBB isn't immediately before MBB, insert a branch to it.
175 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
176 MBB->addSuccessor(NewDest);
177}
178
180 bool NewMI, unsigned Idx1,
181 unsigned Idx2) const {
182 const MCInstrDesc &MCID = MI.getDesc();
183 bool HasDef = MCID.getNumDefs();
184 if (HasDef && !MI.getOperand(0).isReg())
185 // No idea how to commute this instruction. Target should implement its own.
186 return nullptr;
187
188 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
189 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
190 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
191 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
192 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
193 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
194 "This only knows how to commute register operands so far");
195
196 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
197 Register Reg1 = MI.getOperand(Idx1).getReg();
198 Register Reg2 = MI.getOperand(Idx2).getReg();
199 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
200 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
201 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
202 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
203 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
204 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
205 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
206 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
207 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
208 // Avoid calling isRenamable for virtual registers since we assert that
209 // renamable property is only queried/set for physical registers.
210 bool Reg1IsRenamable =
211 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
212 bool Reg2IsRenamable =
213 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
214
215 // For a case like this:
216 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
217 // we need to update the implicit-def after commuting to result in:
218 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
219 SmallVector<unsigned> UpdateImplicitDefIdx;
220 if (HasDef && MI.hasImplicitDef()) {
221 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
222 Register ImplReg = MO.getReg();
223 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
224 (ImplReg.isPhysical() && Reg0.isPhysical() &&
225 TRI.isSubRegisterEq(ImplReg, Reg0)))
226 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
227 }
228 }
229
230 // If destination is tied to either of the commuted source register, then
231 // it must be updated.
232 if (HasDef && Reg0 == Reg1 &&
233 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
234 Reg2IsKill = false;
235 Reg0 = Reg2;
236 SubReg0 = SubReg2;
237 } else if (HasDef && Reg0 == Reg2 &&
238 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
239 Reg1IsKill = false;
240 Reg0 = Reg1;
241 SubReg0 = SubReg1;
242 }
243
244 MachineInstr *CommutedMI = nullptr;
245 if (NewMI) {
246 // Create a new instruction.
247 MachineFunction &MF = *MI.getMF();
248 CommutedMI = MF.CloneMachineInstr(&MI);
249 } else {
250 CommutedMI = &MI;
251 }
252
253 if (HasDef) {
254 CommutedMI->getOperand(0).setReg(Reg0);
255 CommutedMI->getOperand(0).setSubReg(SubReg0);
256 for (unsigned Idx : UpdateImplicitDefIdx)
257 CommutedMI->getOperand(Idx).setReg(Reg0);
258 }
259 CommutedMI->getOperand(Idx2).setReg(Reg1);
260 CommutedMI->getOperand(Idx1).setReg(Reg2);
261 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
262 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
263 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
264 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
265 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
266 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
267 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
268 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
269 // Avoid calling setIsRenamable for virtual registers since we assert that
270 // renamable property is only queried/set for physical registers.
271 if (Reg1.isPhysical())
272 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
273 if (Reg2.isPhysical())
274 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
275 return CommutedMI;
276}
277
279 unsigned OpIdx1,
280 unsigned OpIdx2) const {
281 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
282 // any commutable operand, which is done in findCommutedOpIndices() method
283 // called below.
284 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
285 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
286 assert(MI.isCommutable() &&
287 "Precondition violation: MI must be commutable.");
288 return nullptr;
289 }
290 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
291}
292
294 unsigned &ResultIdx2,
295 unsigned CommutableOpIdx1,
296 unsigned CommutableOpIdx2) {
297 if (ResultIdx1 == CommuteAnyOperandIndex &&
298 ResultIdx2 == CommuteAnyOperandIndex) {
299 ResultIdx1 = CommutableOpIdx1;
300 ResultIdx2 = CommutableOpIdx2;
301 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
302 if (ResultIdx2 == CommutableOpIdx1)
303 ResultIdx1 = CommutableOpIdx2;
304 else if (ResultIdx2 == CommutableOpIdx2)
305 ResultIdx1 = CommutableOpIdx1;
306 else
307 return false;
308 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
309 if (ResultIdx1 == CommutableOpIdx1)
310 ResultIdx2 = CommutableOpIdx2;
311 else if (ResultIdx1 == CommutableOpIdx2)
312 ResultIdx2 = CommutableOpIdx1;
313 else
314 return false;
315 } else
316 // Check that the result operand indices match the given commutable
317 // operand indices.
318 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
319 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
320
321 return true;
322}
323
325 unsigned &SrcOpIdx1,
326 unsigned &SrcOpIdx2) const {
327 assert(!MI.isBundle() &&
328 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
329
330 const MCInstrDesc &MCID = MI.getDesc();
331 if (!MCID.isCommutable())
332 return false;
333
334 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
335 // is not true, then the target must implement this.
336 unsigned CommutableOpIdx1 = MCID.getNumDefs();
337 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
338 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
339 CommutableOpIdx1, CommutableOpIdx2))
340 return false;
341
342 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
343 // No idea.
344 return false;
345 return true;
346}
347
349 if (!MI.isTerminator()) return false;
350
351 // Conditional branch is a special case.
352 if (MI.isBranch() && !MI.isBarrier())
353 return true;
354 if (!MI.isPredicable())
355 return true;
356 return !isPredicated(MI);
357}
358
361 bool MadeChange = false;
362
363 assert(!MI.isBundle() &&
364 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
365
366 const MCInstrDesc &MCID = MI.getDesc();
367 if (!MI.isPredicable())
368 return false;
369
370 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
371 if (MCID.operands()[i].isPredicate()) {
372 MachineOperand &MO = MI.getOperand(i);
373 if (MO.isReg()) {
374 MO.setReg(Pred[j].getReg());
375 MadeChange = true;
376 } else if (MO.isImm()) {
377 MO.setImm(Pred[j].getImm());
378 MadeChange = true;
379 } else if (MO.isMBB()) {
380 MO.setMBB(Pred[j].getMBB());
381 MadeChange = true;
382 }
383 ++j;
384 }
385 }
386 return MadeChange;
387}
388
390 const MachineInstr &MI,
392 size_t StartSize = Accesses.size();
393 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
394 oe = MI.memoperands_end();
395 o != oe; ++o) {
396 if ((*o)->isLoad() &&
397 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
398 Accesses.push_back(*o);
399 }
400 return Accesses.size() != StartSize;
401}
402
404 const MachineInstr &MI,
406 size_t StartSize = Accesses.size();
407 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
408 oe = MI.memoperands_end();
409 o != oe; ++o) {
410 if ((*o)->isStore() &&
411 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
412 Accesses.push_back(*o);
413 }
414 return Accesses.size() != StartSize;
415}
416
418 unsigned SubIdx, unsigned &Size,
419 unsigned &Offset,
420 const MachineFunction &MF) const {
421 if (!SubIdx) {
422 Size = TRI.getSpillSize(*RC);
423 Offset = 0;
424 return true;
425 }
426 unsigned BitSize = TRI.getSubRegIdxSize(SubIdx);
427 // Convert bit size to byte size.
428 if (BitSize % 8)
429 return false;
430
431 int BitOffset = TRI.getSubRegIdxOffset(SubIdx);
432 if (BitOffset < 0 || BitOffset % 8)
433 return false;
434
435 Size = BitSize / 8;
436 Offset = (unsigned)BitOffset / 8;
437
438 assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
439
440 if (!MF.getDataLayout().isLittleEndian()) {
441 Offset = TRI.getSpillSize(*RC) - (Offset + Size);
442 }
443 return true;
444}
445
448 Register DestReg, unsigned SubIdx,
449 const MachineInstr &Orig) const {
450 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
451 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
452 MBB.insert(I, MI);
453}
454
456 const MachineInstr &MI1,
457 const MachineRegisterInfo *MRI) const {
459}
460
463 MachineBasicBlock::iterator InsertBefore,
464 const MachineInstr &Orig) const {
465 MachineFunction &MF = *MBB.getParent();
466 // CFI instructions are marked as non-duplicable, because Darwin compact
467 // unwind info emission can't handle multiple prologue setups.
468 assert((!Orig.isNotDuplicable() ||
470 Orig.isCFIInstruction())) &&
471 "Instruction cannot be duplicated");
472
473 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
474}
475
476// If the COPY instruction in MI can be folded to a stack operation, return
477// the register class to use.
479 const TargetInstrInfo &TII,
480 unsigned FoldIdx) {
481 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
482 if (MI.getNumOperands() != 2)
483 return nullptr;
484 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
485
486 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
487 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
488
489 if (FoldOp.getSubReg() || LiveOp.getSubReg())
490 return nullptr;
491
492 Register FoldReg = FoldOp.getReg();
493 Register LiveReg = LiveOp.getReg();
494
495 assert(FoldReg.isVirtual() && "Cannot fold physregs");
496
497 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
498 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
499
500 if (LiveOp.getReg().isPhysical())
501 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
502
503 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
504 return RC;
505
506 // FIXME: Allow folding when register classes are memory compatible.
507 return nullptr;
508}
509
510MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
511
512/// Try to remove the load by folding it to a register
513/// operand at the use. We fold the load instructions if load defines a virtual
514/// register, the virtual register is used once in the same BB, and the
515/// instructions in-between do not load or store, and have no side effects.
518 Register &FoldAsLoadDefReg,
519 MachineInstr *&DefMI) const {
520 // Check whether we can move DefMI here.
521 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
522 assert(DefMI);
523 bool SawStore = false;
524 if (!DefMI->isSafeToMove(SawStore))
525 return nullptr;
526
527 // Collect information about virtual register operands of MI.
528 SmallVector<unsigned, 1> SrcOperandIds;
529 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
530 MachineOperand &MO = MI.getOperand(i);
531 if (!MO.isReg())
532 continue;
533 Register Reg = MO.getReg();
534 if (Reg != FoldAsLoadDefReg)
535 continue;
536 // Do not fold if we have a subreg use or a def.
537 if (MO.getSubReg() || MO.isDef())
538 return nullptr;
539 SrcOperandIds.push_back(i);
540 }
541 if (SrcOperandIds.empty())
542 return nullptr;
543
544 // Check whether we can fold the def into SrcOperandId.
545 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
546 FoldAsLoadDefReg = 0;
547 return FoldMI;
548 }
549
550 return nullptr;
551}
552
553std::pair<unsigned, unsigned>
555 switch (MI.getOpcode()) {
556 case TargetOpcode::STACKMAP:
557 // StackMapLiveValues are foldable
558 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
559 case TargetOpcode::PATCHPOINT:
560 // For PatchPoint, the call args are not foldable (even if reported in the
561 // stackmap e.g. via anyregcc).
562 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
563 case TargetOpcode::STATEPOINT:
564 // For statepoints, fold deopt and gc arguments, but not call arguments.
565 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
566 default:
567 llvm_unreachable("unexpected stackmap opcode");
568 }
569}
570
572 ArrayRef<unsigned> Ops, int FrameIndex,
573 const TargetInstrInfo &TII) {
574 unsigned StartIdx = 0;
575 unsigned NumDefs = 0;
576 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
577 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
578
579 unsigned DefToFoldIdx = MI.getNumOperands();
580
581 // Return false if any operands requested for folding are not foldable (not
582 // part of the stackmap's live values).
583 for (unsigned Op : Ops) {
584 if (Op < NumDefs) {
585 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
586 DefToFoldIdx = Op;
587 } else if (Op < StartIdx) {
588 return nullptr;
589 }
590 if (MI.getOperand(Op).isTied())
591 return nullptr;
592 }
593
594 MachineInstr *NewMI =
595 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
596 MachineInstrBuilder MIB(MF, NewMI);
597
598 // No need to fold return, the meta data, and function arguments
599 for (unsigned i = 0; i < StartIdx; ++i)
600 if (i != DefToFoldIdx)
601 MIB.add(MI.getOperand(i));
602
603 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
604 MachineOperand &MO = MI.getOperand(i);
605 unsigned TiedTo = e;
606 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
607
608 if (is_contained(Ops, i)) {
609 assert(TiedTo == e && "Cannot fold tied operands");
610 unsigned SpillSize;
611 unsigned SpillOffset;
612 // Compute the spill slot size and offset.
613 const TargetRegisterClass *RC =
614 MF.getRegInfo().getRegClass(MO.getReg());
615 bool Valid =
616 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
617 if (!Valid)
618 report_fatal_error("cannot spill patchpoint subregister operand");
619 MIB.addImm(StackMaps::IndirectMemRefOp);
620 MIB.addImm(SpillSize);
621 MIB.addFrameIndex(FrameIndex);
622 MIB.addImm(SpillOffset);
623 } else {
624 MIB.add(MO);
625 if (TiedTo < e) {
626 assert(TiedTo < NumDefs && "Bad tied operand");
627 if (TiedTo > DefToFoldIdx)
628 --TiedTo;
629 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
630 }
631 }
632 }
633 return NewMI;
634}
635
636static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
637 const TargetInstrInfo &TII) {
638 // If the machine operand is tied, untie it first.
639 if (MI->getOperand(OpNo).isTied()) {
640 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
641 MI->untieRegOperand(OpNo);
642 // Intentional recursion!
643 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
644 }
645
647 TII.getFrameIndexOperands(NewOps, FI);
648 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
649 MI->removeOperand(OpNo);
650 MI->insert(MI->operands_begin() + OpNo, NewOps);
651
652 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
653 // is the per-target number of operands that represent the memory operand
654 // excluding this one (MD). This includes MO.
656 F.setMemConstraint(InlineAsm::ConstraintCode::m);
657 MachineOperand &MD = MI->getOperand(OpNo - 1);
658 MD.setImm(F);
659}
660
661// Returns nullptr if not possible to fold.
663 ArrayRef<unsigned> Ops, int FI,
664 const TargetInstrInfo &TII) {
665 assert(MI.isInlineAsm() && "wrong opcode");
666 if (Ops.size() > 1)
667 return nullptr;
668 unsigned Op = Ops[0];
669 assert(Op && "should never be first operand");
670 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
671
672 if (!MI.mayFoldInlineAsmRegOp(Op))
673 return nullptr;
674
675 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
676
677 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
678
679 // Update mayload/maystore metadata, and memoperands.
680 const VirtRegInfo &RI =
681 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
684 if (RI.Reads) {
685 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
687 }
688 if (RI.Writes) {
689 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
691 }
692 MachineFunction *MF = NewMI.getMF();
693 const MachineFrameInfo &MFI = MF->getFrameInfo();
695 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
696 MFI.getObjectAlign(FI));
697 NewMI.addMemOperand(*MF, MMO);
698
699 return &NewMI;
700}
701
703 ArrayRef<unsigned> Ops, int FI,
704 LiveIntervals *LIS,
705 VirtRegMap *VRM) const {
706 auto Flags = MachineMemOperand::MONone;
707 for (unsigned OpIdx : Ops)
708 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
710
711 MachineBasicBlock *MBB = MI.getParent();
712 assert(MBB && "foldMemoryOperand needs an inserted instruction");
713 MachineFunction &MF = *MBB->getParent();
714
715 // If we're not folding a load into a subreg, the size of the load is the
716 // size of the spill slot. But if we are, we need to figure out what the
717 // actual load size is.
718 int64_t MemSize = 0;
719 const MachineFrameInfo &MFI = MF.getFrameInfo();
720
721 if (Flags & MachineMemOperand::MOStore) {
722 MemSize = MFI.getObjectSize(FI);
723 } else {
724 for (unsigned OpIdx : Ops) {
725 int64_t OpSize = MFI.getObjectSize(FI);
726
727 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
728 unsigned SubRegSize = TRI.getSubRegIdxSize(SubReg);
729 if (SubRegSize > 0 && !(SubRegSize % 8))
730 OpSize = SubRegSize / 8;
731 }
732
733 MemSize = std::max(MemSize, OpSize);
734 }
735 }
736
737 assert(MemSize && "Did not expect a zero-sized stack slot");
738
739 MachineInstr *NewMI = nullptr;
740
741 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
742 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
743 MI.getOpcode() == TargetOpcode::STATEPOINT) {
744 // Fold stackmap/patchpoint.
745 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
746 if (NewMI)
747 MBB->insert(MI, NewMI);
748 } else if (MI.isInlineAsm()) {
749 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
750 } else {
751 // Ask the target to do the actual folding.
752 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
753 }
754
755 if (NewMI) {
756 NewMI->setMemRefs(MF, MI.memoperands());
757 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
759 NewMI->mayStore()) &&
760 "Folded a def to a non-store!");
761 assert((!(Flags & MachineMemOperand::MOLoad) ||
762 NewMI->mayLoad()) &&
763 "Folded a use to a non-load!");
764 assert(MFI.getObjectOffset(FI) != -1);
765 MachineMemOperand *MMO =
767 Flags, MemSize, MFI.getObjectAlign(FI));
768 NewMI->addMemOperand(MF, MMO);
769
770 // The pass "x86 speculative load hardening" always attaches symbols to
771 // call instructions. We need copy it form old instruction.
772 NewMI->cloneInstrSymbols(MF, MI);
773
774 return NewMI;
775 }
776
777 // Straight COPY may fold as load/store.
778 if (!isCopyInstr(MI) || Ops.size() != 1)
779 return nullptr;
780
781 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
782 if (!RC)
783 return nullptr;
784
785 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
787 if (Flags == MachineMemOperand::MOStore) {
788 if (MO.isUndef()) {
789 // If this is an undef copy, we do not need to bother we inserting spill
790 // code.
791 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
792 } else {
793 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC,
794 Register());
795 }
796 } else
797 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, Register());
798
799 return &*--Pos;
800}
801
804 MachineInstr &LoadMI,
805 LiveIntervals *LIS) const {
806 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
807#ifndef NDEBUG
808 for (unsigned OpIdx : Ops)
809 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
810#endif
811
812 MachineBasicBlock &MBB = *MI.getParent();
813 MachineFunction &MF = *MBB.getParent();
814
815 // Ask the target to do the actual folding.
816 MachineInstr *NewMI = nullptr;
817 int FrameIndex = 0;
818
819 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
820 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
821 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
822 isLoadFromStackSlot(LoadMI, FrameIndex)) {
823 // Fold stackmap/patchpoint.
824 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
825 if (NewMI)
826 NewMI = &*MBB.insert(MI, NewMI);
827 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
828 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
829 } else {
830 // Ask the target to do the actual folding.
831 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
832 }
833
834 if (!NewMI)
835 return nullptr;
836
837 // Copy the memoperands from the load to the folded instruction.
838 if (MI.memoperands_empty()) {
839 NewMI->setMemRefs(MF, LoadMI.memoperands());
840 } else {
841 // Handle the rare case of folding multiple loads.
842 NewMI->setMemRefs(MF, MI.memoperands());
844 E = LoadMI.memoperands_end();
845 I != E; ++I) {
846 NewMI->addMemOperand(MF, *I);
847 }
848 }
849 return NewMI;
850}
851
852/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
853/// replacement instructions immediately precede it. Copy any implicit
854/// operands from MI to the replacement instruction.
856 const TargetRegisterInfo *TRI) {
858 --CopyMI;
859
860 Register DstReg = MI->getOperand(0).getReg();
861 for (const MachineOperand &MO : MI->implicit_operands()) {
862 CopyMI->addOperand(MO);
863
864 // Be conservative about preserving kills when subregister defs are
865 // involved. If there was implicit kill of a super-register overlapping the
866 // copy result, we would kill the subregisters previous copies defined.
867
868 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
869 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
870 }
871}
872
874 MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
875 if (MI->allDefsAreDead()) {
876 MI->setDesc(get(TargetOpcode::KILL));
877 return;
878 }
879
880 MachineOperand &DstMO = MI->getOperand(0);
881 MachineOperand &SrcMO = MI->getOperand(1);
882
883 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
884 if (IdentityCopy || SrcMO.isUndef()) {
885 // No need to insert an identity copy instruction, but replace with a KILL
886 // if liveness is changed.
887 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
888 // We must make sure the super-register gets killed. Replace the
889 // instruction with KILL.
890 MI->setDesc(get(TargetOpcode::KILL));
891 return;
892 }
893 // Vanilla identity copy.
894 MI->eraseFromParent();
895 return;
896 }
897
898 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
899 SrcMO.getReg(), SrcMO.isKill(),
900 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
901 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
902
903 if (MI->getNumOperands() > 2)
905 MI->eraseFromParent();
906}
907
909 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
910 const MachineOperand &Op1 = Inst.getOperand(1);
911 const MachineOperand &Op2 = Inst.getOperand(2);
912 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
913
914 // We need virtual register definitions for the operands that we will
915 // reassociate.
916 MachineInstr *MI1 = nullptr;
917 MachineInstr *MI2 = nullptr;
918 if (Op1.isReg() && Op1.getReg().isVirtual())
919 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
920 if (Op2.isReg() && Op2.getReg().isVirtual())
921 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
922
923 // And at least one operand must be defined in MBB.
924 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
925}
926
928 unsigned Opcode2) const {
929 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
930}
931
933 bool &Commuted) const {
934 const MachineBasicBlock *MBB = Inst.getParent();
935 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
936 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
937 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
938 unsigned Opcode = Inst.getOpcode();
939
940 // If only one operand has the same or inverse opcode and it's the second
941 // source operand, the operands must be commuted.
942 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
943 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
944 if (Commuted)
945 std::swap(MI1, MI2);
946
947 // 1. The previous instruction must be the same type as Inst.
948 // 2. The previous instruction must also be associative/commutative or be the
949 // inverse of such an operation (this can be different even for
950 // instructions with the same opcode if traits like fast-math-flags are
951 // included).
952 // 3. The previous instruction must have virtual register definitions for its
953 // operands in the same basic block as Inst.
954 // 4. The previous instruction's result must only be used by Inst.
955 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
957 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
959 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
960}
961
962// 1. The operation must be associative and commutative or be the inverse of
963// such an operation.
964// 2. The instruction must have virtual register definitions for its
965// operands in the same basic block.
966// 3. The instruction must have a reassociable sibling.
968 bool &Commuted) const {
969 return (isAssociativeAndCommutative(Inst) ||
970 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
971 hasReassociableOperands(Inst, Inst.getParent()) &&
972 hasReassociableSibling(Inst, Commuted);
973}
974
975// Utility routine that checks if \param MO is defined by an
976// \param CombineOpc instruction in the basic block \param MBB.
977// If \param CombineOpc is not provided, the OpCode check will
978// be skipped.
980 unsigned CombineOpc = 0) {
981 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
982 MachineInstr *MI = nullptr;
983
984 if (MO.isReg() && MO.getReg().isVirtual())
985 MI = MRI.getUniqueVRegDef(MO.getReg());
986 // And it needs to be in the trace (otherwise, it won't have a depth).
987 if (!MI || MI->getParent() != &MBB ||
988 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
989 return false;
990 // Must only used by the user we combine with.
991 if (!MRI.hasOneNonDBGUse(MO.getReg()))
992 return false;
993
994 return true;
995}
996
997// A chain of accumulation instructions will be selected IFF:
998// 1. All the accumulation instructions in the chain have the same opcode,
999// besides the first that has a slightly different opcode because it does
1000// not accumulate into a register.
1001// 2. All the instructions in the chain are combinable (have a single use
1002// which itself is part of the chain).
1003// 3. Meets the required minimum length.
1005 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1006 // Walk up the chain of accumulation instructions and collect them in the
1007 // vector.
1008 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1009 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1010 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1011 std::optional<unsigned> ChainStartOpCode =
1012 getAccumulationStartOpcode(AccumulatorOpcode);
1013
1014 if (!ChainStartOpCode.has_value())
1015 return;
1016
1017 // Push the first accumulator result to the start of the chain.
1018 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1019
1020 // Collect the accumulator input register from all instructions in the chain.
1021 while (CurrentInstr &&
1022 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1023 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1024 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1025 }
1026
1027 // Add the instruction at the top of the chain.
1028 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1029 canCombine(MBB, CurrentInstr->getOperand(1)))
1030 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1031}
1032
1033/// Find chains of accumulations that can be rewritten as a tree for increased
1034/// ILP.
1036 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1038 return false;
1039
1040 unsigned Opc = Root.getOpcode();
1042 return false;
1043
1044 // Verify that this is the end of the chain.
1045 MachineBasicBlock &MBB = *Root.getParent();
1046 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1047 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1048 return false;
1049
1050 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1051 if (User->getOpcode() == Opc)
1052 return false;
1053
1054 // Walk up the use chain and collect the reduction chain.
1056 getAccumulatorChain(&Root, Chain);
1057
1058 // Reject chains which are too short to be worth modifying.
1059 if (Chain.size() < MinAccumulatorDepth)
1060 return false;
1061
1062 // Check if the MBB this instruction is a part of contains any other chains.
1063 // If so, don't apply it.
1064 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1065 for (const auto &I : MBB) {
1066 if (I.getOpcode() == Opc &&
1067 !ReductionChain.contains(I.getOperand(0).getReg()))
1068 return false;
1069 }
1070
1072 return true;
1073}
1074
1075// Reduce branches of the accumulator tree by adding them together.
1077 SmallVectorImpl<Register> &RegistersToReduce,
1080 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1081 Register ResultReg) const {
1084
1085 // Get the opcode for the reduction instruction we will need to build.
1086 // If for some reason it is not defined, early exit and don't apply this.
1087 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1088
1089 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1090 auto RHS = RegistersToReduce[i - 1];
1091 auto LHS = RegistersToReduce[i];
1092 Register Dest;
1093 // If we are reducing 2 registers, reuse the original result register.
1094 if (RegistersToReduce.size() == 2)
1095 Dest = ResultReg;
1096 // Otherwise, create a new virtual register to hold the partial sum.
1097 else {
1098 auto NewVR = MRI.createVirtualRegister(
1099 MRI.getRegClass(Root.getOperand(0).getReg()));
1100 Dest = NewVR;
1101 NewRegs.push_back(Dest);
1102 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1103 }
1104
1105 // Create the new reduction instruction.
1107 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1108 .addReg(RHS, getKillRegState(true))
1109 .addReg(LHS, getKillRegState(true));
1110 // Copy any flags needed from the original instruction.
1111 MIB->setFlags(Root.getFlags());
1112 InsInstrs.push_back(MIB);
1113 }
1114
1115 // If the number of registers to reduce is odd, add the remaining register to
1116 // the vector of registers to reduce.
1117 if (RegistersToReduce.size() % 2 != 0)
1118 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1119
1120 RegistersToReduce = NewRegs;
1121}
1122
1123// The concept of the reassociation pass is that these operations can benefit
1124// from this kind of transformation:
1125//
1126// A = ? op ?
1127// B = A op X (Prev)
1128// C = B op Y (Root)
1129// -->
1130// A = ? op ?
1131// B = X op Y
1132// C = A op B
1133//
1134// breaking the dependency between A and B, allowing them to be executed in
1135// parallel (or back-to-back in a pipeline) instead of depending on each other.
1136
1137// FIXME: This has the potential to be expensive (compile time) while not
1138// improving the code at all. Some ways to limit the overhead:
1139// 1. Track successful transforms; bail out if hit rate gets too low.
1140// 2. Only enable at -O3 or some other non-default optimization level.
1141// 3. Pre-screen pattern candidates here: if an operand of the previous
1142// instruction is known to not increase the critical path, then don't match
1143// that pattern.
1145 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1146 bool DoRegPressureReduce) const {
1147 bool Commute;
1148 if (isReassociationCandidate(Root, Commute)) {
1149 // We found a sequence of instructions that may be suitable for a
1150 // reassociation of operands to increase ILP. Specify each commutation
1151 // possibility for the Prev instruction in the sequence and let the
1152 // machine combiner decide if changing the operands is worthwhile.
1153 if (Commute) {
1156 } else {
1159 }
1160 return true;
1161 }
1162 if (getAccumulatorReassociationPatterns(Root, Patterns))
1163 return true;
1164
1165 return false;
1166}
1167
1168/// Return true when a code sequence can improve loop throughput.
1170 return false;
1171}
1172
1175 switch (Pattern) {
1178 default:
1180 }
1181}
1182
1183std::pair<unsigned, unsigned>
1185 const MachineInstr &Root,
1186 const MachineInstr &Prev) const {
1187 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1188 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1189
1190 // Early exit if both opcodes are associative and commutative. It's a trivial
1191 // reassociation when we only change operands order. In this case opcodes are
1192 // not required to have inverse versions.
1193 if (AssocCommutRoot && AssocCommutPrev) {
1194 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1195 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1196 }
1197
1198 // At least one instruction is not associative or commutative.
1199 // Since we have matched one of the reassociation patterns, we expect that the
1200 // instructions' opcodes are equal or one of them is the inversion of the
1201 // other.
1203 "Incorrectly matched pattern");
1204 unsigned AssocCommutOpcode = Root.getOpcode();
1205 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1206 if (!AssocCommutRoot)
1207 std::swap(AssocCommutOpcode, InverseOpcode);
1208
1209 // The transformation rule (`+` is any associative and commutative binary
1210 // operation, `-` is the inverse):
1211 // REASSOC_AX_BY:
1212 // (A + X) + Y => A + (X + Y)
1213 // (A + X) - Y => A + (X - Y)
1214 // (A - X) + Y => A - (X - Y)
1215 // (A - X) - Y => A - (X + Y)
1216 // REASSOC_XA_BY:
1217 // (X + A) + Y => (X + Y) + A
1218 // (X + A) - Y => (X - Y) + A
1219 // (X - A) + Y => (X + Y) - A
1220 // (X - A) - Y => (X - Y) - A
1221 // REASSOC_AX_YB:
1222 // Y + (A + X) => (Y + X) + A
1223 // Y - (A + X) => (Y - X) - A
1224 // Y + (A - X) => (Y - X) + A
1225 // Y - (A - X) => (Y + X) - A
1226 // REASSOC_XA_YB:
1227 // Y + (X + A) => (Y + X) + A
1228 // Y - (X + A) => (Y - X) - A
1229 // Y + (X - A) => (Y + X) - A
1230 // Y - (X - A) => (Y - X) + A
1231 switch (Pattern) {
1232 default:
1233 llvm_unreachable("Unexpected pattern");
1235 if (!AssocCommutRoot && AssocCommutPrev)
1236 return {AssocCommutOpcode, InverseOpcode};
1237 if (AssocCommutRoot && !AssocCommutPrev)
1238 return {InverseOpcode, InverseOpcode};
1239 if (!AssocCommutRoot && !AssocCommutPrev)
1240 return {InverseOpcode, AssocCommutOpcode};
1241 break;
1243 if (!AssocCommutRoot && AssocCommutPrev)
1244 return {AssocCommutOpcode, InverseOpcode};
1245 if (AssocCommutRoot && !AssocCommutPrev)
1246 return {InverseOpcode, AssocCommutOpcode};
1247 if (!AssocCommutRoot && !AssocCommutPrev)
1248 return {InverseOpcode, InverseOpcode};
1249 break;
1251 if (!AssocCommutRoot && AssocCommutPrev)
1252 return {InverseOpcode, InverseOpcode};
1253 if (AssocCommutRoot && !AssocCommutPrev)
1254 return {AssocCommutOpcode, InverseOpcode};
1255 if (!AssocCommutRoot && !AssocCommutPrev)
1256 return {InverseOpcode, AssocCommutOpcode};
1257 break;
1259 if (!AssocCommutRoot && AssocCommutPrev)
1260 return {InverseOpcode, InverseOpcode};
1261 if (AssocCommutRoot && !AssocCommutPrev)
1262 return {InverseOpcode, AssocCommutOpcode};
1263 if (!AssocCommutRoot && !AssocCommutPrev)
1264 return {AssocCommutOpcode, InverseOpcode};
1265 break;
1266 }
1267 llvm_unreachable("Unhandled combination");
1268}
1269
1270// Return a pair of boolean flags showing if the new root and new prev operands
1271// must be swapped. See visual example of the rule in
1272// TargetInstrInfo::getReassociationOpcodes.
1273static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1274 switch (Pattern) {
1275 default:
1276 llvm_unreachable("Unexpected pattern");
1278 return {false, false};
1280 return {true, false};
1282 return {true, true};
1284 return {true, true};
1285 }
1286}
1287
1289 const MachineInstr &Root, unsigned Pattern,
1290 std::array<unsigned, 5> &OperandIndices) const {
1291 switch (Pattern) {
1293 OperandIndices = {1, 1, 1, 2, 2};
1294 break;
1296 OperandIndices = {2, 1, 2, 2, 1};
1297 break;
1299 OperandIndices = {1, 2, 1, 1, 2};
1300 break;
1302 OperandIndices = {2, 2, 2, 1, 1};
1303 break;
1304 default:
1305 llvm_unreachable("unexpected MachineCombinerPattern");
1306 }
1307}
1308
1309/// Attempt the reassociation transformation to reduce critical path length.
1310/// See the above comments before getMachineCombinerPatterns().
1312 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1316 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1317 MachineFunction *MF = Root.getMF();
1320 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, &TRI);
1321
1326 MachineOperand &OpC = Root.getOperand(0);
1327
1328 Register RegA = OpA.getReg();
1329 unsigned SubRegA = OpA.getSubReg();
1330 Register RegB = OpB.getReg();
1331 Register RegX = OpX.getReg();
1332 unsigned SubRegX = OpX.getSubReg();
1333 Register RegY = OpY.getReg();
1334 unsigned SubRegY = OpY.getSubReg();
1335 Register RegC = OpC.getReg();
1336
1337 if (RegA.isVirtual())
1338 MRI.constrainRegClass(RegA, RC);
1339 if (RegB.isVirtual())
1340 MRI.constrainRegClass(RegB, RC);
1341 if (RegX.isVirtual())
1342 MRI.constrainRegClass(RegX, RC);
1343 if (RegY.isVirtual())
1344 MRI.constrainRegClass(RegY, RC);
1345 if (RegC.isVirtual())
1346 MRI.constrainRegClass(RegC, RC);
1347
1348 // Create a new virtual register for the result of (X op Y) instead of
1349 // recycling RegB because the MachineCombiner's computation of the critical
1350 // path requires a new register definition rather than an existing one.
1351 Register NewVR = MRI.createVirtualRegister(RC);
1352 unsigned SubRegNewVR = 0;
1353 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1354
1355 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1356 bool KillA = OpA.isKill();
1357 bool KillX = OpX.isKill();
1358 bool KillY = OpY.isKill();
1359 bool KillNewVR = true;
1360
1361 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1362
1363 if (SwapPrevOperands) {
1364 std::swap(RegX, RegY);
1365 std::swap(SubRegX, SubRegY);
1366 std::swap(KillX, KillY);
1367 }
1368
1369 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1370 unsigned RootFirstOpIdx, RootSecondOpIdx;
1371 switch (Pattern) {
1373 PrevFirstOpIdx = OperandIndices[1];
1374 PrevSecondOpIdx = OperandIndices[3];
1375 RootFirstOpIdx = OperandIndices[2];
1376 RootSecondOpIdx = OperandIndices[4];
1377 break;
1379 PrevFirstOpIdx = OperandIndices[1];
1380 PrevSecondOpIdx = OperandIndices[3];
1381 RootFirstOpIdx = OperandIndices[4];
1382 RootSecondOpIdx = OperandIndices[2];
1383 break;
1385 PrevFirstOpIdx = OperandIndices[3];
1386 PrevSecondOpIdx = OperandIndices[1];
1387 RootFirstOpIdx = OperandIndices[2];
1388 RootSecondOpIdx = OperandIndices[4];
1389 break;
1391 PrevFirstOpIdx = OperandIndices[3];
1392 PrevSecondOpIdx = OperandIndices[1];
1393 RootFirstOpIdx = OperandIndices[4];
1394 RootSecondOpIdx = OperandIndices[2];
1395 break;
1396 default:
1397 llvm_unreachable("unexpected MachineCombinerPattern");
1398 }
1399
1400 // Basically BuildMI but doesn't add implicit operands by default.
1401 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1402 const MCInstrDesc &MCID, Register DestReg) {
1403 return MachineInstrBuilder(
1404 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1405 .copyMIMetadata(MIMD)
1406 .addReg(DestReg, RegState::Define);
1407 };
1408
1409 // Create new instructions for insertion.
1410 MachineInstrBuilder MIB1 =
1411 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1412 for (const auto &MO : Prev.explicit_operands()) {
1413 unsigned Idx = MO.getOperandNo();
1414 // Skip the result operand we'd already added.
1415 if (Idx == 0)
1416 continue;
1417 if (Idx == PrevFirstOpIdx)
1418 MIB1.addReg(RegX, getKillRegState(KillX), SubRegX);
1419 else if (Idx == PrevSecondOpIdx)
1420 MIB1.addReg(RegY, getKillRegState(KillY), SubRegY);
1421 else
1422 MIB1.add(MO);
1423 }
1424 MIB1.copyImplicitOps(Prev);
1425
1426 if (SwapRootOperands) {
1427 std::swap(RegA, NewVR);
1428 std::swap(SubRegA, SubRegNewVR);
1429 std::swap(KillA, KillNewVR);
1430 }
1431
1432 MachineInstrBuilder MIB2 =
1433 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1434 for (const auto &MO : Root.explicit_operands()) {
1435 unsigned Idx = MO.getOperandNo();
1436 // Skip the result operand.
1437 if (Idx == 0)
1438 continue;
1439 if (Idx == RootFirstOpIdx)
1440 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA), SubRegA);
1441 else if (Idx == RootSecondOpIdx)
1442 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR), SubRegNewVR);
1443 else
1444 MIB2 = MIB2.add(MO);
1445 }
1446 MIB2.copyImplicitOps(Root);
1447
1448 // Propagate FP flags from the original instructions.
1449 // But clear poison-generating flags because those may not be valid now.
1450 // TODO: There should be a helper function for copying only fast-math-flags.
1451 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1452 MIB1->setFlags(IntersectedFlags);
1457
1458 MIB2->setFlags(IntersectedFlags);
1463
1464 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1465
1466 // Record new instructions for insertion and old instructions for deletion.
1467 InsInstrs.push_back(MIB1);
1468 InsInstrs.push_back(MIB2);
1469 DelInstrs.push_back(&Prev);
1470 DelInstrs.push_back(&Root);
1471
1472 // We transformed:
1473 // B = A op X (Prev)
1474 // C = B op Y (Root)
1475 // Into:
1476 // B = X op Y (MIB1)
1477 // C = A op B (MIB2)
1478 // C has the same value as before, B doesn't; as such, keep the debug number
1479 // of C but not of B.
1480 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1481 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1482}
1483
1485 MachineInstr &Root, unsigned Pattern,
1488 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1490 MachineBasicBlock &MBB = *Root.getParent();
1491 MachineFunction &MF = *MBB.getParent();
1493
1494 switch (Pattern) {
1499 // Select the previous instruction in the sequence based on the input
1500 // pattern.
1501 std::array<unsigned, 5> OperandIndices;
1503 MachineInstr *Prev =
1504 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1505
1506 // Don't reassociate if Prev and Root are in different blocks.
1507 if (Prev->getParent() != Root.getParent())
1508 return;
1509
1510 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1511 InstIdxForVirtReg);
1512 break;
1513 }
1515 SmallVector<Register, 32> ChainRegs;
1516 getAccumulatorChain(&Root, ChainRegs);
1517 unsigned int Depth = ChainRegs.size();
1519 "Max accumulator width set to illegal value");
1520 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1521 ? Log2_32(Depth)
1523
1524 // Walk down the chain and rewrite it as a tree.
1525 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1526 // No need to rewrite the first node, it is already perfect as it is.
1527 if (IndexedReg.index() == 0)
1528 continue;
1529
1530 // FIXME: Losing subregisters
1531 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1533 Register AccReg;
1534 if (IndexedReg.index() < MaxWidth) {
1535 // Now we need to create new instructions for the first row.
1536 AccReg = Instr->getOperand(0).getReg();
1537 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1538
1539 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1540 .addReg(Instr->getOperand(2).getReg(),
1541 getKillRegState(Instr->getOperand(2).isKill()))
1542 .addReg(Instr->getOperand(3).getReg(),
1543 getKillRegState(Instr->getOperand(3).isKill()));
1544 } else {
1545 // For the remaining cases, we need to use an output register of one of
1546 // the newly inserted instuctions as operand 1
1547 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1548 ? MRI.createVirtualRegister(
1549 MRI.getRegClass(Root.getOperand(0).getReg()))
1550 : Instr->getOperand(0).getReg();
1551 assert(IndexedReg.index() >= MaxWidth);
1552 auto AccumulatorInput =
1553 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1554 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1555 AccReg)
1556 .addReg(AccumulatorInput, getKillRegState(true))
1557 .addReg(Instr->getOperand(2).getReg(),
1558 getKillRegState(Instr->getOperand(2).isKill()))
1559 .addReg(Instr->getOperand(3).getReg(),
1560 getKillRegState(Instr->getOperand(3).isKill()));
1561 }
1562
1563 MIB->setFlags(Instr->getFlags());
1564 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1565 InsInstrs.push_back(MIB);
1566 DelInstrs.push_back(Instr);
1567 }
1568
1569 SmallVector<Register, 8> RegistersToReduce;
1570 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1571 ++i) {
1572 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1573 RegistersToReduce.push_back(Reg);
1574 }
1575
1576 while (RegistersToReduce.size() > 1)
1577 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1578 InstIdxForVirtReg, Root.getOperand(0).getReg());
1579
1580 break;
1581 }
1582 }
1583}
1584
1588
1590 const MachineInstr &MI) const {
1591 const MachineFunction &MF = *MI.getMF();
1592 const MachineRegisterInfo &MRI = MF.getRegInfo();
1593
1594 // Remat clients assume operand 0 is the defined register.
1595 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1596 return false;
1597 Register DefReg = MI.getOperand(0).getReg();
1598
1599 // A sub-register definition can only be rematerialized if the instruction
1600 // doesn't read the other parts of the register. Otherwise it is really a
1601 // read-modify-write operation on the full virtual register which cannot be
1602 // moved safely.
1603 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1604 MI.readsVirtualRegister(DefReg))
1605 return false;
1606
1607 // A load from a fixed stack slot can be rematerialized. This may be
1608 // redundant with subsequent checks, but it's target-independent,
1609 // simple, and a common case.
1610 int FrameIdx = 0;
1611 if (isLoadFromStackSlot(MI, FrameIdx) &&
1612 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1613 return true;
1614
1615 // Avoid instructions obviously unsafe for remat.
1616 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1617 MI.hasUnmodeledSideEffects())
1618 return false;
1619
1620 // Don't remat inline asm. We have no idea how expensive it is
1621 // even if it's side effect free.
1622 if (MI.isInlineAsm())
1623 return false;
1624
1625 // Avoid instructions which load from potentially varying memory.
1626 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1627 return false;
1628
1629 // If any of the registers accessed are non-constant, conservatively assume
1630 // the instruction is not rematerializable.
1631 for (const MachineOperand &MO : MI.operands()) {
1632 if (!MO.isReg()) continue;
1633 Register Reg = MO.getReg();
1634 if (Reg == 0)
1635 continue;
1636
1637 // Check for a well-behaved physical register.
1638 if (Reg.isPhysical()) {
1639 if (MO.isUse()) {
1640 // If the physreg has no defs anywhere, it's just an ambient register
1641 // and we can freely move its uses. Alternatively, if it's allocatable,
1642 // it could get allocated to something with a def during allocation.
1643 if (!MRI.isConstantPhysReg(Reg))
1644 return false;
1645 } else {
1646 // A physreg def. We can't remat it.
1647 return false;
1648 }
1649 continue;
1650 }
1651
1652 // Only allow one virtual-register def. There may be multiple defs of the
1653 // same virtual register, though.
1654 if (MO.isDef() && Reg != DefReg)
1655 return false;
1656 }
1657
1658 // Everything checked out.
1659 return true;
1660}
1661
1663 const MachineFunction *MF = MI.getMF();
1665 bool StackGrowsDown =
1667
1668 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1669 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1670
1671 if (!isFrameInstr(MI))
1672 return 0;
1673
1674 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1675
1676 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1677 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1678 SPAdj = -SPAdj;
1679
1680 return SPAdj;
1681}
1682
1683/// isSchedulingBoundary - Test if the given instruction should be
1684/// considered a scheduling boundary. This primarily includes labels
1685/// and terminators.
1687 const MachineBasicBlock *MBB,
1688 const MachineFunction &MF) const {
1689 // Terminators and labels can't be scheduled around.
1690 if (MI.isTerminator() || MI.isPosition())
1691 return true;
1692
1693 // INLINEASM_BR can jump to another block
1694 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1695 return true;
1696
1697 // Don't attempt to schedule around any instruction that defines
1698 // a stack-oriented pointer, as it's unlikely to be profitable. This
1699 // saves compile time, because it doesn't require every single
1700 // stack slot reference to depend on the instruction that does the
1701 // modification.
1702 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1703 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), &TRI);
1704}
1705
1706// Provide a global flag for disabling the PreRA hazard recognizer that targets
1707// may choose to honor.
1711
1712// Default implementation of CreateTargetRAHazardRecognizer.
1715 const ScheduleDAG *DAG) const {
1716 // Dummy hazard recognizer allows all instructions to issue.
1717 return new ScheduleHazardRecognizer();
1718}
1719
1720// Default implementation of CreateTargetMIHazardRecognizer.
1722 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1723 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1724}
1725
1726// Default implementation of CreateTargetPostRAHazardRecognizer.
1732
1733// Default implementation of getMemOperandWithOffset.
1735 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1736 bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
1739 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1740 Width, &TRI) ||
1741 BaseOps.size() != 1)
1742 return false;
1743 BaseOp = BaseOps.front();
1744 return true;
1745}
1746
1747//===----------------------------------------------------------------------===//
1748// SelectionDAG latency interface.
1749//===----------------------------------------------------------------------===//
1750
1751std::optional<unsigned>
1753 SDNode *DefNode, unsigned DefIdx,
1754 SDNode *UseNode, unsigned UseIdx) const {
1755 if (!ItinData || ItinData->isEmpty())
1756 return std::nullopt;
1757
1758 if (!DefNode->isMachineOpcode())
1759 return std::nullopt;
1760
1761 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1762 if (!UseNode->isMachineOpcode())
1763 return ItinData->getOperandCycle(DefClass, DefIdx);
1764 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1765 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1766}
1767
1769 SDNode *N) const {
1770 if (!ItinData || ItinData->isEmpty())
1771 return 1;
1772
1773 if (!N->isMachineOpcode())
1774 return 1;
1775
1776 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1777}
1778
1779//===----------------------------------------------------------------------===//
1780// MachineInstr latency interface.
1781//===----------------------------------------------------------------------===//
1782
1784 const MachineInstr &MI) const {
1785 if (!ItinData || ItinData->isEmpty())
1786 return 1;
1787
1788 unsigned Class = MI.getDesc().getSchedClass();
1789 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1790 if (UOps >= 0)
1791 return UOps;
1792
1793 // The # of u-ops is dynamically determined. The specific target should
1794 // override this function to return the right number.
1795 return 1;
1796}
1797
1798/// Return the default expected latency for a def based on it's opcode.
1800 const MachineInstr &DefMI) const {
1801 if (DefMI.isTransient())
1802 return 0;
1803 if (DefMI.mayLoad())
1804 return SchedModel.LoadLatency;
1805 if (isHighLatencyDef(DefMI.getOpcode()))
1806 return SchedModel.HighLatency;
1807 return 1;
1808}
1809
1811 return 0;
1812}
1813
1815 const MachineInstr &MI,
1816 unsigned *PredCost) const {
1817 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1818 // still have a MinLatency property, which getStageLatency checks.
1819 if (!ItinData)
1820 return MI.mayLoad() ? 2 : 1;
1821
1822 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1823}
1824
1826 const MachineInstr &DefMI,
1827 unsigned DefIdx) const {
1828 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1829 if (!ItinData || ItinData->isEmpty())
1830 return false;
1831
1832 unsigned DefClass = DefMI.getDesc().getSchedClass();
1833 std::optional<unsigned> DefCycle =
1834 ItinData->getOperandCycle(DefClass, DefIdx);
1835 return DefCycle && DefCycle <= 1U;
1836}
1837
1839 // TODO: We don't split functions where a section attribute has been set
1840 // since the split part may not be placed in a contiguous region. It may also
1841 // be more beneficial to augment the linker to ensure contiguous layout of
1842 // split functions within the same section as specified by the attribute.
1843 if (MF.getFunction().hasSection())
1844 return false;
1845
1846 // We don't want to proceed further for cold functions
1847 // or functions of unknown hotness. Lukewarm functions have no prefix.
1848 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1849 if (SectionPrefix &&
1850 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1851 return false;
1852 }
1853
1854 return true;
1855}
1856
1857std::optional<ParamLoadedValue>
1859 Register Reg) const {
1860 const MachineFunction *MF = MI.getMF();
1862 int64_t Offset;
1863 bool OffsetIsScalable;
1864
1865 // To simplify the sub-register handling, verify that we only need to
1866 // consider physical registers.
1867 assert(MF->getProperties().hasNoVRegs());
1868
1869 if (auto DestSrc = isCopyInstr(MI)) {
1870 Register DestReg = DestSrc->Destination->getReg();
1871
1872 // If the copy destination is the forwarding reg, describe the forwarding
1873 // reg using the copy source as the backup location. Example:
1874 //
1875 // x0 = MOV x7
1876 // call callee(x0) ; x0 described as x7
1877 if (Reg == DestReg)
1878 return ParamLoadedValue(*DestSrc->Source, Expr);
1879
1880 // If the target's hook couldn't describe this copy, give up.
1881 return std::nullopt;
1882 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1883 Register SrcReg = RegImm->Reg;
1884 Offset = RegImm->Imm;
1886 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1887 } else if (MI.hasOneMemOperand()) {
1888 // Only describe memory which provably does not escape the function. As
1889 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1890 // callee (or by another thread).
1891 const MachineFrameInfo &MFI = MF->getFrameInfo();
1892 const MachineMemOperand *MMO = MI.memoperands()[0];
1893 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1894
1895 // If the address points to "special" memory (e.g. a spill slot), it's
1896 // sufficient to check that it isn't aliased by any high-level IR value.
1897 if (!PSV || PSV->mayAlias(&MFI))
1898 return std::nullopt;
1899
1900 const MachineOperand *BaseOp;
1901 if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
1902 return std::nullopt;
1903
1904 // FIXME: Scalable offsets are not yet handled in the offset code below.
1905 if (OffsetIsScalable)
1906 return std::nullopt;
1907
1908 // TODO: Can currently only handle mem instructions with a single define.
1909 // An example from the x86 target:
1910 // ...
1911 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1912 // ...
1913 //
1914 if (MI.getNumExplicitDefs() != 1)
1915 return std::nullopt;
1916
1917 // TODO: In what way do we need to take Reg into consideration here?
1918
1921 Ops.push_back(dwarf::DW_OP_deref_size);
1922 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1923 : ~UINT64_C(0));
1924 Expr = DIExpression::prependOpcodes(Expr, Ops);
1925 return ParamLoadedValue(*BaseOp, Expr);
1926 }
1927
1928 return std::nullopt;
1929}
1930
1931// Get the call frame size just before MI.
1933 // Search backwards from MI for the most recent call frame instruction.
1934 MachineBasicBlock *MBB = MI.getParent();
1935 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1936 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1937 return getFrameTotalSize(AdjI);
1938 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1939 return 0;
1940 }
1941
1942 // If none was found, use the call frame size from the start of the basic
1943 // block.
1944 return MBB->getCallFrameSize();
1945}
1946
1947/// Both DefMI and UseMI must be valid. By default, call directly to the
1948/// itinerary. This may be overriden by the target.
1950 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1951 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1952 unsigned DefClass = DefMI.getDesc().getSchedClass();
1953 unsigned UseClass = UseMI.getDesc().getSchedClass();
1954 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1955}
1956
1958 const MachineInstr &MI, unsigned DefIdx,
1959 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1960 assert((MI.isRegSequence() ||
1961 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1962
1963 if (!MI.isRegSequence())
1964 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1965
1966 // We are looking at:
1967 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1968 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1969 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1970 OpIdx += 2) {
1971 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1972 if (MOReg.isUndef())
1973 continue;
1974 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1975 assert(MOSubIdx.isImm() &&
1976 "One of the subindex of the reg_sequence is not an immediate");
1977 // Record Reg:SubReg, SubIdx.
1978 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1979 (unsigned)MOSubIdx.getImm()));
1980 }
1981 return true;
1982}
1983
1985 const MachineInstr &MI, unsigned DefIdx,
1986 RegSubRegPairAndIdx &InputReg) const {
1987 assert((MI.isExtractSubreg() ||
1988 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1989
1990 if (!MI.isExtractSubreg())
1991 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1992
1993 // We are looking at:
1994 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1995 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1996 const MachineOperand &MOReg = MI.getOperand(1);
1997 if (MOReg.isUndef())
1998 return false;
1999 const MachineOperand &MOSubIdx = MI.getOperand(2);
2000 assert(MOSubIdx.isImm() &&
2001 "The subindex of the extract_subreg is not an immediate");
2002
2003 InputReg.Reg = MOReg.getReg();
2004 InputReg.SubReg = MOReg.getSubReg();
2005 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2006 return true;
2007}
2008
2010 const MachineInstr &MI, unsigned DefIdx,
2011 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2012 assert((MI.isInsertSubreg() ||
2013 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2014
2015 if (!MI.isInsertSubreg())
2016 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2017
2018 // We are looking at:
2019 // Def = INSERT_SEQUENCE v0, v1, sub0.
2020 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2021 const MachineOperand &MOBaseReg = MI.getOperand(1);
2022 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2023 if (MOInsertedReg.isUndef())
2024 return false;
2025 const MachineOperand &MOSubIdx = MI.getOperand(3);
2026 assert(MOSubIdx.isImm() &&
2027 "One of the subindex of the reg_sequence is not an immediate");
2028 BaseReg.Reg = MOBaseReg.getReg();
2029 BaseReg.SubReg = MOBaseReg.getSubReg();
2030
2031 InsertedReg.Reg = MOInsertedReg.getReg();
2032 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2033 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2034 return true;
2035}
2036
2037// Returns a MIRPrinter comment for this machine operand.
2039 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2040 const TargetRegisterInfo * /*RemoveMe*/) const {
2041
2042 if (!MI.isInlineAsm())
2043 return "";
2044
2045 std::string Flags;
2046 raw_string_ostream OS(Flags);
2047
2049 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2050 unsigned ExtraInfo = Op.getImm();
2051 bool First = true;
2052 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
2053 if (!First)
2054 OS << " ";
2055 First = false;
2056 OS << Info;
2057 }
2058
2059 return Flags;
2060 }
2061
2062 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2063 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2064 return "";
2065
2066 assert(Op.isImm() && "Expected flag operand to be an immediate");
2067 // Pretty print the inline asm operand descriptor.
2068 unsigned Flag = Op.getImm();
2069 const InlineAsm::Flag F(Flag);
2070 OS << F.getKindName();
2071
2072 unsigned RCID;
2073 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID))
2074 OS << ':' << TRI.getRegClassName(TRI.getRegClass(RCID));
2075
2076 if (F.isMemKind()) {
2077 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2079 }
2080
2081 unsigned TiedTo;
2082 if (F.isUseOperandTiedToDef(TiedTo))
2083 OS << " tiedto:$" << TiedTo;
2084
2085 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2086 F.getRegMayBeFolded())
2087 OS << " foldable";
2088
2089 return Flags;
2090}
2091
2093
2095 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2096 // Include target features from an arbitrary candidate for the outlined
2097 // function. This makes sure the outlined function knows what kinds of
2098 // instructions are going into it. This is fine, since all parent functions
2099 // must necessarily support the instructions that are in the outlined region.
2100 outliner::Candidate &FirstCand = Candidates.front();
2101 const Function &ParentFn = FirstCand.getMF()->getFunction();
2102 if (ParentFn.hasFnAttribute("target-features"))
2103 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2104 if (ParentFn.hasFnAttribute("target-cpu"))
2105 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2106
2107 // Set nounwind, so we don't generate eh_frame.
2108 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2109 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2110 }))
2111 F.addFnAttr(Attribute::NoUnwind);
2112}
2113
2117 unsigned Flags) const {
2118 MachineInstr &MI = *MIT;
2119
2120 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2121 // have support for outlining those. Special-case that here.
2122 if (MI.isCFIInstruction())
2123 // Just go right to the target implementation.
2124 return getOutliningTypeImpl(MMI, MIT, Flags);
2125
2126 // Be conservative about inline assembly.
2127 if (MI.isInlineAsm())
2129
2130 // Labels generally can't safely be outlined.
2131 if (MI.isLabel())
2133
2134 // Don't let debug instructions impact analysis.
2135 if (MI.isDebugInstr())
2137
2138 // Some other special cases.
2139 switch (MI.getOpcode()) {
2140 case TargetOpcode::IMPLICIT_DEF:
2141 case TargetOpcode::KILL:
2142 case TargetOpcode::LIFETIME_START:
2143 case TargetOpcode::LIFETIME_END:
2145 default:
2146 break;
2147 }
2148
2149 // Is this a terminator for a basic block?
2150 if (MI.isTerminator()) {
2151 // If this is a branch to another block, we can't outline it.
2152 if (!MI.getParent()->succ_empty())
2154
2155 // Don't outline if the branch is not unconditional.
2156 if (isPredicated(MI))
2158 }
2159
2160 // Make sure none of the operands of this instruction do anything that
2161 // might break if they're moved outside their current function.
2162 // This includes MachineBasicBlock references, BlockAddressses,
2163 // Constant pool indices and jump table indices.
2164 //
2165 // A quick note on MO_TargetIndex:
2166 // This doesn't seem to be used in any of the architectures that the
2167 // MachineOutliner supports, but it was still filtered out in all of them.
2168 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2169 // As such, this check is removed both here and in the target-specific
2170 // implementations. Instead, we assert to make sure this doesn't
2171 // catch anyone off-guard somewhere down the line.
2172 for (const MachineOperand &MOP : MI.operands()) {
2173 // If you hit this assertion, please remove it and adjust
2174 // `getOutliningTypeImpl` for your target appropriately if necessary.
2175 // Adding the assertion back to other supported architectures
2176 // would be nice too :)
2177 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2178
2179 // CFI instructions should already have been filtered out at this point.
2180 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2181
2182 // PrologEpilogInserter should've already run at this point.
2183 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2184
2185 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2187 }
2188
2189 // If we don't know, delegate to the target-specific hook.
2190 return getOutliningTypeImpl(MMI, MIT, Flags);
2191}
2192
2194 unsigned &Flags) const {
2195 // Some instrumentations create special TargetOpcode at the start which
2196 // expands to special code sequences which must be present.
2197 auto First = MBB.getFirstNonDebugInstr();
2198 if (First == MBB.end())
2199 return true;
2200
2201 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2202 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2203 return false;
2204
2205 // Some instrumentations create special pseudo-instructions at or just before
2206 // the end that must be present.
2207 auto Last = MBB.getLastNonDebugInstr();
2208 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2209 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2210 return false;
2211
2212 if (Last != First && Last->isReturn()) {
2213 --Last;
2214 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2215 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2216 return false;
2217 }
2218 return true;
2219}
2220
2222 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2223 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2224}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:207
A debug info location.
Definition DebugLoc.h:124
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:765
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:309
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:446
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:527
StringRef getCommentString() const
Definition MCAsmInfo.h:538
const char * getSeparatorString() const
Definition MCAsmInfo.h:533
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
const TargetRegisterInfo & TRI
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:627
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
@ Length
Definition DWP.cpp:532
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const