LLVM 23.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
40
41using namespace llvm;
42
44 "disable-sched-hazard", cl::Hidden, cl::init(false),
45 cl::desc("Disable hazard detection during preRA scheduling"));
46
48 "acc-reassoc", cl::Hidden, cl::init(true),
49 cl::desc("Enable reassociation of accumulation chains"));
50
53 cl::desc("Minimum length of accumulator chains "
54 "required for the optimization to kick in"));
55
57 "acc-max-width", cl::Hidden, cl::init(3),
58 cl::desc("Maximum number of branches in the accumulator tree"));
59
61
63 unsigned OpNum) const {
64 if (OpNum >= MCID.getNumOperands())
65 return nullptr;
66
67 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
68 int16_t RegClass = getOpRegClassID(OpInfo);
69
70 // Instructions like INSERT_SUBREG do not have fixed register classes.
71 if (RegClass < 0)
72 return nullptr;
73
74 // Otherwise just look it up normally.
75 return TRI.getRegClass(RegClass);
76}
77
78/// insertNoop - Insert a noop into the instruction stream at the specified
79/// point.
82 llvm_unreachable("Target didn't implement insertNoop!");
83}
84
85/// insertNoops - Insert noops into the instruction stream at the specified
86/// point.
89 unsigned Quantity) const {
90 for (unsigned i = 0; i < Quantity; ++i)
92}
93
94static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
95 return strncmp(Str, MAI.getCommentString().data(),
96 MAI.getCommentString().size()) == 0;
97}
98
99/// Measure the specified inline asm to determine an approximation of its
100/// length.
101/// Comments (which run till the next SeparatorString or newline) do not
102/// count as an instruction.
103/// Any other non-whitespace text is considered an instruction, with
104/// multiple instructions separated by SeparatorString or newlines.
105/// Variable-length instructions are not handled here; this function
106/// may be overloaded in the target code to do that.
107/// We implement a special case of the .space directive which takes only a
108/// single integer argument in base 10 that is the size in bytes. This is a
109/// restricted form of the GAS directive in that we only interpret
110/// simple--i.e. not a logical or arithmetic expression--size values without
111/// the optional fill value. This is primarily used for creating arbitrary
112/// sized inline asm blocks for testing purposes.
114 const char *Str,
115 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
116 // Count the number of instructions in the asm.
117 bool AtInsnStart = true;
118 unsigned Length = 0;
119 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
120 for (; *Str; ++Str) {
121 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
122 strlen(MAI.getSeparatorString())) == 0) {
123 AtInsnStart = true;
124 } else if (isAsmComment(Str, MAI)) {
125 // Stop counting as an instruction after a comment until the next
126 // separator.
127 AtInsnStart = false;
128 }
129
130 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
131 unsigned AddLength = MaxInstLength;
132 if (strncmp(Str, ".space", 6) == 0) {
133 char *EStr;
134 int SpaceSize;
135 SpaceSize = strtol(Str + 6, &EStr, 10);
136 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
137 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
138 ++EStr;
139 if (*EStr == '\0' || *EStr == '\n' ||
140 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
141 AddLength = SpaceSize;
142 }
143 Length += AddLength;
144 AtInsnStart = false;
145 }
146 }
147
148 return Length;
149}
150
151/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
152/// after it, replacing it with an unconditional branch to NewDest.
153void
155 MachineBasicBlock *NewDest) const {
156 MachineBasicBlock *MBB = Tail->getParent();
157
158 // Remove all the old successors of MBB from the CFG.
159 while (!MBB->succ_empty())
160 MBB->removeSuccessor(MBB->succ_begin());
161
162 // Save off the debug loc before erasing the instruction.
163 DebugLoc DL = Tail->getDebugLoc();
164
165 // Update call info and remove all the dead instructions
166 // from the end of MBB.
167 while (Tail != MBB->end()) {
168 auto MI = Tail++;
169 if (MI->shouldUpdateAdditionalCallInfo())
170 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
171 MBB->erase(MI);
172 }
173
174 // If MBB isn't immediately before MBB, insert a branch to it.
176 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
177 MBB->addSuccessor(NewDest);
178}
179
181 bool NewMI, unsigned Idx1,
182 unsigned Idx2) const {
183 const MCInstrDesc &MCID = MI.getDesc();
184 bool HasDef = MCID.getNumDefs();
185 if (HasDef && !MI.getOperand(0).isReg())
186 // No idea how to commute this instruction. Target should implement its own.
187 return nullptr;
188
189 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
190 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
191 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
192 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
193 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
194 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
195 "This only knows how to commute register operands so far");
196
197 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
198 Register Reg1 = MI.getOperand(Idx1).getReg();
199 Register Reg2 = MI.getOperand(Idx2).getReg();
200 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
201 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
202 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
203 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
204 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
205 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
206 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
207 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
208 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
209 // Avoid calling isRenamable for virtual registers since we assert that
210 // renamable property is only queried/set for physical registers.
211 bool Reg1IsRenamable =
212 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
213 bool Reg2IsRenamable =
214 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
215
216 // For a case like this:
217 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
218 // we need to update the implicit-def after commuting to result in:
219 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
220 SmallVector<unsigned> UpdateImplicitDefIdx;
221 if (HasDef && MI.hasImplicitDef()) {
222 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
223 Register ImplReg = MO.getReg();
224 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
225 (ImplReg.isPhysical() && Reg0.isPhysical() &&
226 TRI.isSubRegisterEq(ImplReg, Reg0)))
227 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
228 }
229 }
230
231 // If destination is tied to either of the commuted source register, then
232 // it must be updated.
233 if (HasDef && Reg0 == Reg1 &&
234 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
235 Reg2IsKill = false;
236 Reg0 = Reg2;
237 SubReg0 = SubReg2;
238 } else if (HasDef && Reg0 == Reg2 &&
239 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
240 Reg1IsKill = false;
241 Reg0 = Reg1;
242 SubReg0 = SubReg1;
243 }
244
245 MachineInstr *CommutedMI = nullptr;
246 if (NewMI) {
247 // Create a new instruction.
248 MachineFunction &MF = *MI.getMF();
249 CommutedMI = MF.CloneMachineInstr(&MI);
250 } else {
251 CommutedMI = &MI;
252 }
253
254 if (HasDef) {
255 CommutedMI->getOperand(0).setReg(Reg0);
256 CommutedMI->getOperand(0).setSubReg(SubReg0);
257 for (unsigned Idx : UpdateImplicitDefIdx)
258 CommutedMI->getOperand(Idx).setReg(Reg0);
259 }
260 CommutedMI->getOperand(Idx2).setReg(Reg1);
261 CommutedMI->getOperand(Idx1).setReg(Reg2);
262 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
263 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
264 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
265 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
266 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
267 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
268 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
269 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
270 // Avoid calling setIsRenamable for virtual registers since we assert that
271 // renamable property is only queried/set for physical registers.
272 if (Reg1.isPhysical())
273 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
274 if (Reg2.isPhysical())
275 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
276 return CommutedMI;
277}
278
280 unsigned OpIdx1,
281 unsigned OpIdx2) const {
282 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
283 // any commutable operand, which is done in findCommutedOpIndices() method
284 // called below.
285 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
286 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
287 assert(MI.isCommutable() &&
288 "Precondition violation: MI must be commutable.");
289 return nullptr;
290 }
291 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
292}
293
295 unsigned &ResultIdx2,
296 unsigned CommutableOpIdx1,
297 unsigned CommutableOpIdx2) {
298 if (ResultIdx1 == CommuteAnyOperandIndex &&
299 ResultIdx2 == CommuteAnyOperandIndex) {
300 ResultIdx1 = CommutableOpIdx1;
301 ResultIdx2 = CommutableOpIdx2;
302 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
303 if (ResultIdx2 == CommutableOpIdx1)
304 ResultIdx1 = CommutableOpIdx2;
305 else if (ResultIdx2 == CommutableOpIdx2)
306 ResultIdx1 = CommutableOpIdx1;
307 else
308 return false;
309 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
310 if (ResultIdx1 == CommutableOpIdx1)
311 ResultIdx2 = CommutableOpIdx2;
312 else if (ResultIdx1 == CommutableOpIdx2)
313 ResultIdx2 = CommutableOpIdx1;
314 else
315 return false;
316 } else
317 // Check that the result operand indices match the given commutable
318 // operand indices.
319 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
320 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
321
322 return true;
323}
324
326 unsigned &SrcOpIdx1,
327 unsigned &SrcOpIdx2) const {
328 assert(!MI.isBundle() &&
329 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
330
331 const MCInstrDesc &MCID = MI.getDesc();
332 if (!MCID.isCommutable())
333 return false;
334
335 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
336 // is not true, then the target must implement this.
337 unsigned CommutableOpIdx1 = MCID.getNumDefs();
338 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
339 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
340 CommutableOpIdx1, CommutableOpIdx2))
341 return false;
342
343 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
344 // No idea.
345 return false;
346 return true;
347}
348
350 if (!MI.isTerminator()) return false;
351
352 // Conditional branch is a special case.
353 if (MI.isBranch() && !MI.isBarrier())
354 return true;
355 if (!MI.isPredicable())
356 return true;
357 return !isPredicated(MI);
358}
359
362 bool MadeChange = false;
363
364 assert(!MI.isBundle() &&
365 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
366
367 const MCInstrDesc &MCID = MI.getDesc();
368 if (!MI.isPredicable())
369 return false;
370
371 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
372 if (MCID.operands()[i].isPredicate()) {
373 MachineOperand &MO = MI.getOperand(i);
374 if (MO.isReg()) {
375 MO.setReg(Pred[j].getReg());
376 MadeChange = true;
377 } else if (MO.isImm()) {
378 MO.setImm(Pred[j].getImm());
379 MadeChange = true;
380 } else if (MO.isMBB()) {
381 MO.setMBB(Pred[j].getMBB());
382 MadeChange = true;
383 }
384 ++j;
385 }
386 }
387 return MadeChange;
388}
389
391 const MachineInstr &MI,
393 size_t StartSize = Accesses.size();
394 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
395 oe = MI.memoperands_end();
396 o != oe; ++o) {
397 if ((*o)->isLoad() &&
398 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
399 Accesses.push_back(*o);
400 }
401 return Accesses.size() != StartSize;
402}
403
405 const MachineInstr &MI,
407 size_t StartSize = Accesses.size();
408 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
409 oe = MI.memoperands_end();
410 o != oe; ++o) {
411 if ((*o)->isStore() &&
412 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
413 Accesses.push_back(*o);
414 }
415 return Accesses.size() != StartSize;
416}
417
419 unsigned SubIdx, unsigned &Size,
420 unsigned &Offset,
421 const MachineFunction &MF) const {
422 if (!SubIdx) {
423 Size = TRI.getSpillSize(*RC);
424 Offset = 0;
425 return true;
426 }
427 unsigned BitSize = TRI.getSubRegIdxSize(SubIdx);
428 // Convert bit size to byte size.
429 if (BitSize % 8)
430 return false;
431
432 int BitOffset = TRI.getSubRegIdxOffset(SubIdx);
433 if (BitOffset < 0 || BitOffset % 8)
434 return false;
435
436 Size = BitSize / 8;
437 Offset = (unsigned)BitOffset / 8;
438
439 assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
440
441 if (!MF.getDataLayout().isLittleEndian()) {
442 Offset = TRI.getSpillSize(*RC) - (Offset + Size);
443 }
444 return true;
445}
446
449 Register DestReg, unsigned SubIdx,
450 const MachineInstr &Orig) const {
451 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
452 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
453 MBB.insert(I, MI);
454}
455
457 const MachineInstr &MI1,
458 const MachineRegisterInfo *MRI) const {
460}
461
464 MachineBasicBlock::iterator InsertBefore,
465 const MachineInstr &Orig) const {
466 MachineFunction &MF = *MBB.getParent();
467 // CFI instructions are marked as non-duplicable, because Darwin compact
468 // unwind info emission can't handle multiple prologue setups.
469 assert((!Orig.isNotDuplicable() ||
471 Orig.isCFIInstruction())) &&
472 "Instruction cannot be duplicated");
473
474 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
475}
476
477// If the COPY instruction in MI can be folded to a stack operation, return
478// the register class to use.
480 const TargetInstrInfo &TII,
481 unsigned FoldIdx) {
482 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
483 if (MI.getNumOperands() != 2)
484 return nullptr;
485 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
486
487 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
488 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
489
490 if (FoldOp.getSubReg() || LiveOp.getSubReg())
491 return nullptr;
492
493 Register FoldReg = FoldOp.getReg();
494 Register LiveReg = LiveOp.getReg();
495
496 assert(FoldReg.isVirtual() && "Cannot fold physregs");
497
498 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
499 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
500
501 if (LiveOp.getReg().isPhysical())
502 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
503
504 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
505 return RC;
506
507 // FIXME: Allow folding when register classes are memory compatible.
508 return nullptr;
509}
510
511MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
512
513/// Try to remove the load by folding it to a register
514/// operand at the use. We fold the load instructions if load defines a virtual
515/// register, the virtual register is used once in the same BB, and the
516/// instructions in-between do not load or store, and have no side effects.
519 Register &FoldAsLoadDefReg,
520 MachineInstr *&DefMI) const {
521 // Check whether we can move DefMI here.
522 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
523 assert(DefMI);
524 bool SawStore = false;
525 if (!DefMI->isSafeToMove(SawStore))
526 return nullptr;
527
528 // Collect information about virtual register operands of MI.
529 SmallVector<unsigned, 1> SrcOperandIds;
530 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
531 MachineOperand &MO = MI.getOperand(i);
532 if (!MO.isReg())
533 continue;
534 Register Reg = MO.getReg();
535 if (Reg != FoldAsLoadDefReg)
536 continue;
537 // Do not fold if we have a subreg use or a def.
538 if (MO.getSubReg() || MO.isDef())
539 return nullptr;
540 SrcOperandIds.push_back(i);
541 }
542 if (SrcOperandIds.empty())
543 return nullptr;
544
545 // Check whether we can fold the def into SrcOperandId.
546 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
547 FoldAsLoadDefReg = 0;
548 return FoldMI;
549 }
550
551 return nullptr;
552}
553
554std::pair<unsigned, unsigned>
556 switch (MI.getOpcode()) {
557 case TargetOpcode::STACKMAP:
558 // StackMapLiveValues are foldable
559 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
560 case TargetOpcode::PATCHPOINT:
561 // For PatchPoint, the call args are not foldable (even if reported in the
562 // stackmap e.g. via anyregcc).
563 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
564 case TargetOpcode::STATEPOINT:
565 // For statepoints, fold deopt and gc arguments, but not call arguments.
566 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
567 default:
568 llvm_unreachable("unexpected stackmap opcode");
569 }
570}
571
573 ArrayRef<unsigned> Ops, int FrameIndex,
574 const TargetInstrInfo &TII) {
575 unsigned StartIdx = 0;
576 unsigned NumDefs = 0;
577 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
578 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
579
580 unsigned DefToFoldIdx = MI.getNumOperands();
581
582 // Return false if any operands requested for folding are not foldable (not
583 // part of the stackmap's live values).
584 for (unsigned Op : Ops) {
585 if (Op < NumDefs) {
586 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
587 DefToFoldIdx = Op;
588 } else if (Op < StartIdx) {
589 return nullptr;
590 }
591 if (MI.getOperand(Op).isTied())
592 return nullptr;
593 }
594
595 MachineInstr *NewMI =
596 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
597 MachineInstrBuilder MIB(MF, NewMI);
598
599 // No need to fold return, the meta data, and function arguments
600 for (unsigned i = 0; i < StartIdx; ++i)
601 if (i != DefToFoldIdx)
602 MIB.add(MI.getOperand(i));
603
604 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
605 MachineOperand &MO = MI.getOperand(i);
606 unsigned TiedTo = e;
607 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
608
609 if (is_contained(Ops, i)) {
610 assert(TiedTo == e && "Cannot fold tied operands");
611 unsigned SpillSize;
612 unsigned SpillOffset;
613 // Compute the spill slot size and offset.
614 const TargetRegisterClass *RC =
615 MF.getRegInfo().getRegClass(MO.getReg());
616 bool Valid =
617 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
618 if (!Valid)
619 report_fatal_error("cannot spill patchpoint subregister operand");
620 MIB.addImm(StackMaps::IndirectMemRefOp);
621 MIB.addImm(SpillSize);
622 MIB.addFrameIndex(FrameIndex);
623 MIB.addImm(SpillOffset);
624 } else {
625 MIB.add(MO);
626 if (TiedTo < e) {
627 assert(TiedTo < NumDefs && "Bad tied operand");
628 if (TiedTo > DefToFoldIdx)
629 --TiedTo;
630 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
631 }
632 }
633 }
634 return NewMI;
635}
636
637static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
638 const TargetInstrInfo &TII) {
639 // If the machine operand is tied, untie it first.
640 if (MI->getOperand(OpNo).isTied()) {
641 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
642 MI->untieRegOperand(OpNo);
643 // Intentional recursion!
644 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
645 }
646
648 TII.getFrameIndexOperands(NewOps, FI);
649 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
650 MI->removeOperand(OpNo);
651 MI->insert(MI->operands_begin() + OpNo, NewOps);
652
653 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
654 // is the per-target number of operands that represent the memory operand
655 // excluding this one (MD). This includes MO.
657 F.setMemConstraint(InlineAsm::ConstraintCode::m);
658 MachineOperand &MD = MI->getOperand(OpNo - 1);
659 MD.setImm(F);
660}
661
662// Returns nullptr if not possible to fold.
664 ArrayRef<unsigned> Ops, int FI,
665 const TargetInstrInfo &TII) {
666 assert(MI.isInlineAsm() && "wrong opcode");
667 if (Ops.size() > 1)
668 return nullptr;
669 unsigned Op = Ops[0];
670 assert(Op && "should never be first operand");
671 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
672
673 if (!MI.mayFoldInlineAsmRegOp(Op))
674 return nullptr;
675
676 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
677
678 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
679
680 // Update mayload/maystore metadata, and memoperands.
681 const VirtRegInfo &RI =
682 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
685 if (RI.Reads) {
686 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
688 }
689 if (RI.Writes) {
690 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
692 }
693 MachineFunction *MF = NewMI.getMF();
694 const MachineFrameInfo &MFI = MF->getFrameInfo();
696 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
697 MFI.getObjectAlign(FI));
698 NewMI.addMemOperand(*MF, MMO);
699
700 return &NewMI;
701}
702
704 ArrayRef<unsigned> Ops, int FI,
705 LiveIntervals *LIS,
706 VirtRegMap *VRM) const {
707 auto Flags = MachineMemOperand::MONone;
708 for (unsigned OpIdx : Ops)
709 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
711
712 MachineBasicBlock *MBB = MI.getParent();
713 assert(MBB && "foldMemoryOperand needs an inserted instruction");
714 MachineFunction &MF = *MBB->getParent();
715
716 // If we're not folding a load into a subreg, the size of the load is the
717 // size of the spill slot. But if we are, we need to figure out what the
718 // actual load size is.
719 int64_t MemSize = 0;
720 const MachineFrameInfo &MFI = MF.getFrameInfo();
721
722 if (Flags & MachineMemOperand::MOStore) {
723 MemSize = MFI.getObjectSize(FI);
724 } else {
725 for (unsigned OpIdx : Ops) {
726 int64_t OpSize = MFI.getObjectSize(FI);
727
728 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
729 unsigned SubRegSize = TRI.getSubRegIdxSize(SubReg);
730 if (SubRegSize > 0 && !(SubRegSize % 8))
731 OpSize = SubRegSize / 8;
732 }
733
734 MemSize = std::max(MemSize, OpSize);
735 }
736 }
737
738 assert(MemSize && "Did not expect a zero-sized stack slot");
739
740 MachineInstr *NewMI = nullptr;
741
742 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
743 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
744 MI.getOpcode() == TargetOpcode::STATEPOINT) {
745 // Fold stackmap/patchpoint.
746 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
747 if (NewMI)
748 MBB->insert(MI, NewMI);
749 } else if (MI.isInlineAsm()) {
750 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
751 } else {
752 // Ask the target to do the actual folding.
753 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
754 }
755
756 if (NewMI) {
757 NewMI->setMemRefs(MF, MI.memoperands());
758 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
760 NewMI->mayStore()) &&
761 "Folded a def to a non-store!");
762 assert((!(Flags & MachineMemOperand::MOLoad) ||
763 NewMI->mayLoad()) &&
764 "Folded a use to a non-load!");
765 assert(MFI.getObjectOffset(FI) != -1);
766 MachineMemOperand *MMO =
768 Flags, MemSize, MFI.getObjectAlign(FI));
769 NewMI->addMemOperand(MF, MMO);
770
771 // The pass "x86 speculative load hardening" always attaches symbols to
772 // call instructions. We need copy it form old instruction.
773 NewMI->cloneInstrSymbols(MF, MI);
774
775 return NewMI;
776 }
777
778 // Straight COPY may fold as load/store.
779 if (!isCopyInstr(MI) || Ops.size() != 1)
780 return nullptr;
781
782 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
783 if (!RC)
784 return nullptr;
785
786 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
788 if (Flags == MachineMemOperand::MOStore) {
789 if (MO.isUndef()) {
790 // If this is an undef copy, we do not need to bother we inserting spill
791 // code.
792 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
793 } else {
794 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC,
795 Register());
796 }
797 } else
798 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, Register());
799
800 return &*--Pos;
801}
802
805 MachineInstr &LoadMI,
806 LiveIntervals *LIS) const {
807 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
808#ifndef NDEBUG
809 for (unsigned OpIdx : Ops)
810 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
811#endif
812
813 MachineBasicBlock &MBB = *MI.getParent();
814 MachineFunction &MF = *MBB.getParent();
815
816 // Ask the target to do the actual folding.
817 MachineInstr *NewMI = nullptr;
818 int FrameIndex = 0;
819
820 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
821 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
822 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
823 isLoadFromStackSlot(LoadMI, FrameIndex)) {
824 // Fold stackmap/patchpoint.
825 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
826 if (NewMI)
827 NewMI = &*MBB.insert(MI, NewMI);
828 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
829 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
830 } else {
831 // Ask the target to do the actual folding.
832 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
833 }
834
835 if (!NewMI)
836 return nullptr;
837
838 // Copy the memoperands from the load to the folded instruction.
839 if (MI.memoperands_empty()) {
840 NewMI->setMemRefs(MF, LoadMI.memoperands());
841 } else {
842 // Handle the rare case of folding multiple loads.
843 NewMI->setMemRefs(MF, MI.memoperands());
845 E = LoadMI.memoperands_end();
846 I != E; ++I) {
847 NewMI->addMemOperand(MF, *I);
848 }
849 }
850 return NewMI;
851}
852
853/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
854/// replacement instructions immediately precede it. Copy any implicit
855/// operands from MI to the replacement instruction.
857 const TargetRegisterInfo *TRI) {
859 --CopyMI;
860
861 Register DstReg = MI->getOperand(0).getReg();
862 for (const MachineOperand &MO : MI->implicit_operands()) {
863 CopyMI->addOperand(MO);
864
865 // Be conservative about preserving kills when subregister defs are
866 // involved. If there was implicit kill of a super-register overlapping the
867 // copy result, we would kill the subregisters previous copies defined.
868
869 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
870 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
871 }
872}
873
875 MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
876 if (MI->allDefsAreDead()) {
877 MI->setDesc(get(TargetOpcode::KILL));
878 return;
879 }
880
881 MachineOperand &DstMO = MI->getOperand(0);
882 MachineOperand &SrcMO = MI->getOperand(1);
883
884 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
885 if (IdentityCopy || SrcMO.isUndef()) {
886 // No need to insert an identity copy instruction, but replace with a KILL
887 // if liveness is changed.
888 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
889 // We must make sure the super-register gets killed. Replace the
890 // instruction with KILL.
891 MI->setDesc(get(TargetOpcode::KILL));
892 return;
893 }
894 // Vanilla identity copy.
895 MI->eraseFromParent();
896 return;
897 }
898
899 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
900 SrcMO.getReg(), SrcMO.isKill(),
901 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
902 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
903
904 if (MI->getNumOperands() > 2)
906 MI->eraseFromParent();
907}
908
910 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
911 const MachineOperand &Op1 = Inst.getOperand(1);
912 const MachineOperand &Op2 = Inst.getOperand(2);
913 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
914
915 // We need virtual register definitions for the operands that we will
916 // reassociate.
917 MachineInstr *MI1 = nullptr;
918 MachineInstr *MI2 = nullptr;
919 if (Op1.isReg() && Op1.getReg().isVirtual())
920 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
921 if (Op2.isReg() && Op2.getReg().isVirtual())
922 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
923
924 // And at least one operand must be defined in MBB.
925 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
926}
927
929 unsigned Opcode2) const {
930 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
931}
932
934 bool &Commuted) const {
935 const MachineBasicBlock *MBB = Inst.getParent();
936 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
937 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
938 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
939 unsigned Opcode = Inst.getOpcode();
940
941 // If only one operand has the same or inverse opcode and it's the second
942 // source operand, the operands must be commuted.
943 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
944 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
945 if (Commuted)
946 std::swap(MI1, MI2);
947
948 // 1. The previous instruction must be the same type as Inst.
949 // 2. The previous instruction must also be associative/commutative or be the
950 // inverse of such an operation (this can be different even for
951 // instructions with the same opcode if traits like fast-math-flags are
952 // included).
953 // 3. The previous instruction must have virtual register definitions for its
954 // operands in the same basic block as Inst.
955 // 4. The previous instruction's result must only be used by Inst.
956 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
958 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
960 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
961}
962
963// 1. The operation must be associative and commutative or be the inverse of
964// such an operation.
965// 2. The instruction must have virtual register definitions for its
966// operands in the same basic block.
967// 3. The instruction must have a reassociable sibling.
969 bool &Commuted) const {
970 return (isAssociativeAndCommutative(Inst) ||
971 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
972 hasReassociableOperands(Inst, Inst.getParent()) &&
973 hasReassociableSibling(Inst, Commuted);
974}
975
976// Utility routine that checks if \param MO is defined by an
977// \param CombineOpc instruction in the basic block \param MBB.
978// If \param CombineOpc is not provided, the OpCode check will
979// be skipped.
981 unsigned CombineOpc = 0) {
982 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
983 MachineInstr *MI = nullptr;
984
985 if (MO.isReg() && MO.getReg().isVirtual())
986 MI = MRI.getUniqueVRegDef(MO.getReg());
987 // And it needs to be in the trace (otherwise, it won't have a depth).
988 if (!MI || MI->getParent() != &MBB ||
989 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
990 return false;
991 // Must only used by the user we combine with.
992 if (!MRI.hasOneNonDBGUse(MO.getReg()))
993 return false;
994
995 return true;
996}
997
998// A chain of accumulation instructions will be selected IFF:
999// 1. All the accumulation instructions in the chain have the same opcode,
1000// besides the first that has a slightly different opcode because it does
1001// not accumulate into a register.
1002// 2. All the instructions in the chain are combinable (have a single use
1003// which itself is part of the chain).
1004// 3. Meets the required minimum length.
1006 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1007 // Walk up the chain of accumulation instructions and collect them in the
1008 // vector.
1009 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1010 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1011 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1012 std::optional<unsigned> ChainStartOpCode =
1013 getAccumulationStartOpcode(AccumulatorOpcode);
1014
1015 if (!ChainStartOpCode.has_value())
1016 return;
1017
1018 // Push the first accumulator result to the start of the chain.
1019 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1020
1021 // Collect the accumulator input register from all instructions in the chain.
1022 while (CurrentInstr &&
1023 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1024 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1025 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1026 }
1027
1028 // Add the instruction at the top of the chain.
1029 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1030 canCombine(MBB, CurrentInstr->getOperand(1)))
1031 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1032}
1033
1034/// Find chains of accumulations that can be rewritten as a tree for increased
1035/// ILP.
1037 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1039 return false;
1040
1041 unsigned Opc = Root.getOpcode();
1043 return false;
1044
1045 // Verify that this is the end of the chain.
1046 MachineBasicBlock &MBB = *Root.getParent();
1047 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1048 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1049 return false;
1050
1051 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1052 if (User->getOpcode() == Opc)
1053 return false;
1054
1055 // Walk up the use chain and collect the reduction chain.
1057 getAccumulatorChain(&Root, Chain);
1058
1059 // Reject chains which are too short to be worth modifying.
1060 if (Chain.size() < MinAccumulatorDepth)
1061 return false;
1062
1063 // Check if the MBB this instruction is a part of contains any other chains.
1064 // If so, don't apply it.
1065 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1066 for (const auto &I : MBB) {
1067 if (I.getOpcode() == Opc &&
1068 !ReductionChain.contains(I.getOperand(0).getReg()))
1069 return false;
1070 }
1071
1073 return true;
1074}
1075
1076// Reduce branches of the accumulator tree by adding them together.
1078 SmallVectorImpl<Register> &RegistersToReduce,
1081 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1082 Register ResultReg) const {
1085
1086 // Get the opcode for the reduction instruction we will need to build.
1087 // If for some reason it is not defined, early exit and don't apply this.
1088 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1089
1090 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1091 auto RHS = RegistersToReduce[i - 1];
1092 auto LHS = RegistersToReduce[i];
1093 Register Dest;
1094 // If we are reducing 2 registers, reuse the original result register.
1095 if (RegistersToReduce.size() == 2)
1096 Dest = ResultReg;
1097 // Otherwise, create a new virtual register to hold the partial sum.
1098 else {
1099 auto NewVR = MRI.createVirtualRegister(
1100 MRI.getRegClass(Root.getOperand(0).getReg()));
1101 Dest = NewVR;
1102 NewRegs.push_back(Dest);
1103 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1104 }
1105
1106 // Create the new reduction instruction.
1108 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1109 .addReg(RHS, getKillRegState(true))
1110 .addReg(LHS, getKillRegState(true));
1111 // Copy any flags needed from the original instruction.
1112 MIB->setFlags(Root.getFlags());
1113 InsInstrs.push_back(MIB);
1114 }
1115
1116 // If the number of registers to reduce is odd, add the remaining register to
1117 // the vector of registers to reduce.
1118 if (RegistersToReduce.size() % 2 != 0)
1119 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1120
1121 RegistersToReduce = NewRegs;
1122}
1123
1124// The concept of the reassociation pass is that these operations can benefit
1125// from this kind of transformation:
1126//
1127// A = ? op ?
1128// B = A op X (Prev)
1129// C = B op Y (Root)
1130// -->
1131// A = ? op ?
1132// B = X op Y
1133// C = A op B
1134//
1135// breaking the dependency between A and B, allowing them to be executed in
1136// parallel (or back-to-back in a pipeline) instead of depending on each other.
1137
1138// FIXME: This has the potential to be expensive (compile time) while not
1139// improving the code at all. Some ways to limit the overhead:
1140// 1. Track successful transforms; bail out if hit rate gets too low.
1141// 2. Only enable at -O3 or some other non-default optimization level.
1142// 3. Pre-screen pattern candidates here: if an operand of the previous
1143// instruction is known to not increase the critical path, then don't match
1144// that pattern.
1146 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1147 bool DoRegPressureReduce) const {
1148 bool Commute;
1149 if (isReassociationCandidate(Root, Commute)) {
1150 // We found a sequence of instructions that may be suitable for a
1151 // reassociation of operands to increase ILP. Specify each commutation
1152 // possibility for the Prev instruction in the sequence and let the
1153 // machine combiner decide if changing the operands is worthwhile.
1154 if (Commute) {
1157 } else {
1160 }
1161 return true;
1162 }
1163 if (getAccumulatorReassociationPatterns(Root, Patterns))
1164 return true;
1165
1166 return false;
1167}
1168
1169/// Return true when a code sequence can improve loop throughput.
1171 return false;
1172}
1173
1176 switch (Pattern) {
1179 default:
1181 }
1182}
1183
1184std::pair<unsigned, unsigned>
1186 const MachineInstr &Root,
1187 const MachineInstr &Prev) const {
1188 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1189 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1190
1191 // Early exit if both opcodes are associative and commutative. It's a trivial
1192 // reassociation when we only change operands order. In this case opcodes are
1193 // not required to have inverse versions.
1194 if (AssocCommutRoot && AssocCommutPrev) {
1195 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1196 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1197 }
1198
1199 // At least one instruction is not associative or commutative.
1200 // Since we have matched one of the reassociation patterns, we expect that the
1201 // instructions' opcodes are equal or one of them is the inversion of the
1202 // other.
1204 "Incorrectly matched pattern");
1205 unsigned AssocCommutOpcode = Root.getOpcode();
1206 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1207 if (!AssocCommutRoot)
1208 std::swap(AssocCommutOpcode, InverseOpcode);
1209
1210 // The transformation rule (`+` is any associative and commutative binary
1211 // operation, `-` is the inverse):
1212 // REASSOC_AX_BY:
1213 // (A + X) + Y => A + (X + Y)
1214 // (A + X) - Y => A + (X - Y)
1215 // (A - X) + Y => A - (X - Y)
1216 // (A - X) - Y => A - (X + Y)
1217 // REASSOC_XA_BY:
1218 // (X + A) + Y => (X + Y) + A
1219 // (X + A) - Y => (X - Y) + A
1220 // (X - A) + Y => (X + Y) - A
1221 // (X - A) - Y => (X - Y) - A
1222 // REASSOC_AX_YB:
1223 // Y + (A + X) => (Y + X) + A
1224 // Y - (A + X) => (Y - X) - A
1225 // Y + (A - X) => (Y - X) + A
1226 // Y - (A - X) => (Y + X) - A
1227 // REASSOC_XA_YB:
1228 // Y + (X + A) => (Y + X) + A
1229 // Y - (X + A) => (Y - X) - A
1230 // Y + (X - A) => (Y + X) - A
1231 // Y - (X - A) => (Y - X) + A
1232 switch (Pattern) {
1233 default:
1234 llvm_unreachable("Unexpected pattern");
1236 if (!AssocCommutRoot && AssocCommutPrev)
1237 return {AssocCommutOpcode, InverseOpcode};
1238 if (AssocCommutRoot && !AssocCommutPrev)
1239 return {InverseOpcode, InverseOpcode};
1240 if (!AssocCommutRoot && !AssocCommutPrev)
1241 return {InverseOpcode, AssocCommutOpcode};
1242 break;
1244 if (!AssocCommutRoot && AssocCommutPrev)
1245 return {AssocCommutOpcode, InverseOpcode};
1246 if (AssocCommutRoot && !AssocCommutPrev)
1247 return {InverseOpcode, AssocCommutOpcode};
1248 if (!AssocCommutRoot && !AssocCommutPrev)
1249 return {InverseOpcode, InverseOpcode};
1250 break;
1252 if (!AssocCommutRoot && AssocCommutPrev)
1253 return {InverseOpcode, InverseOpcode};
1254 if (AssocCommutRoot && !AssocCommutPrev)
1255 return {AssocCommutOpcode, InverseOpcode};
1256 if (!AssocCommutRoot && !AssocCommutPrev)
1257 return {InverseOpcode, AssocCommutOpcode};
1258 break;
1260 if (!AssocCommutRoot && AssocCommutPrev)
1261 return {InverseOpcode, InverseOpcode};
1262 if (AssocCommutRoot && !AssocCommutPrev)
1263 return {InverseOpcode, AssocCommutOpcode};
1264 if (!AssocCommutRoot && !AssocCommutPrev)
1265 return {AssocCommutOpcode, InverseOpcode};
1266 break;
1267 }
1268 llvm_unreachable("Unhandled combination");
1269}
1270
1271// Return a pair of boolean flags showing if the new root and new prev operands
1272// must be swapped. See visual example of the rule in
1273// TargetInstrInfo::getReassociationOpcodes.
1274static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1275 switch (Pattern) {
1276 default:
1277 llvm_unreachable("Unexpected pattern");
1279 return {false, false};
1281 return {true, false};
1283 return {true, true};
1285 return {true, true};
1286 }
1287}
1288
1290 const MachineInstr &Root, unsigned Pattern,
1291 std::array<unsigned, 5> &OperandIndices) const {
1292 switch (Pattern) {
1294 OperandIndices = {1, 1, 1, 2, 2};
1295 break;
1297 OperandIndices = {2, 1, 2, 2, 1};
1298 break;
1300 OperandIndices = {1, 2, 1, 1, 2};
1301 break;
1303 OperandIndices = {2, 2, 2, 1, 1};
1304 break;
1305 default:
1306 llvm_unreachable("unexpected MachineCombinerPattern");
1307 }
1308}
1309
1310/// Attempt the reassociation transformation to reduce critical path length.
1311/// See the above comments before getMachineCombinerPatterns().
1313 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1317 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1318 MachineFunction *MF = Root.getMF();
1321 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, &TRI);
1322
1327 MachineOperand &OpC = Root.getOperand(0);
1328
1329 Register RegA = OpA.getReg();
1330 unsigned SubRegA = OpA.getSubReg();
1331 Register RegB = OpB.getReg();
1332 Register RegX = OpX.getReg();
1333 unsigned SubRegX = OpX.getSubReg();
1334 Register RegY = OpY.getReg();
1335 unsigned SubRegY = OpY.getSubReg();
1336 Register RegC = OpC.getReg();
1337
1338 if (RegA.isVirtual())
1339 MRI.constrainRegClass(RegA, RC);
1340 if (RegB.isVirtual())
1341 MRI.constrainRegClass(RegB, RC);
1342 if (RegX.isVirtual())
1343 MRI.constrainRegClass(RegX, RC);
1344 if (RegY.isVirtual())
1345 MRI.constrainRegClass(RegY, RC);
1346 if (RegC.isVirtual())
1347 MRI.constrainRegClass(RegC, RC);
1348
1349 // Create a new virtual register for the result of (X op Y) instead of
1350 // recycling RegB because the MachineCombiner's computation of the critical
1351 // path requires a new register definition rather than an existing one.
1352 Register NewVR = MRI.createVirtualRegister(RC);
1353 unsigned SubRegNewVR = 0;
1354 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1355
1356 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1357 bool KillA = OpA.isKill();
1358 bool KillX = OpX.isKill();
1359 bool KillY = OpY.isKill();
1360 bool KillNewVR = true;
1361
1362 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1363
1364 if (SwapPrevOperands) {
1365 std::swap(RegX, RegY);
1366 std::swap(SubRegX, SubRegY);
1367 std::swap(KillX, KillY);
1368 }
1369
1370 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1371 unsigned RootFirstOpIdx, RootSecondOpIdx;
1372 switch (Pattern) {
1374 PrevFirstOpIdx = OperandIndices[1];
1375 PrevSecondOpIdx = OperandIndices[3];
1376 RootFirstOpIdx = OperandIndices[2];
1377 RootSecondOpIdx = OperandIndices[4];
1378 break;
1380 PrevFirstOpIdx = OperandIndices[1];
1381 PrevSecondOpIdx = OperandIndices[3];
1382 RootFirstOpIdx = OperandIndices[4];
1383 RootSecondOpIdx = OperandIndices[2];
1384 break;
1386 PrevFirstOpIdx = OperandIndices[3];
1387 PrevSecondOpIdx = OperandIndices[1];
1388 RootFirstOpIdx = OperandIndices[2];
1389 RootSecondOpIdx = OperandIndices[4];
1390 break;
1392 PrevFirstOpIdx = OperandIndices[3];
1393 PrevSecondOpIdx = OperandIndices[1];
1394 RootFirstOpIdx = OperandIndices[4];
1395 RootSecondOpIdx = OperandIndices[2];
1396 break;
1397 default:
1398 llvm_unreachable("unexpected MachineCombinerPattern");
1399 }
1400
1401 // Basically BuildMI but doesn't add implicit operands by default.
1402 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1403 const MCInstrDesc &MCID, Register DestReg) {
1404 return MachineInstrBuilder(
1405 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1406 .copyMIMetadata(MIMD)
1407 .addReg(DestReg, RegState::Define);
1408 };
1409
1410 // Create new instructions for insertion.
1411 MachineInstrBuilder MIB1 =
1412 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1413 for (const auto &MO : Prev.explicit_operands()) {
1414 unsigned Idx = MO.getOperandNo();
1415 // Skip the result operand we'd already added.
1416 if (Idx == 0)
1417 continue;
1418 if (Idx == PrevFirstOpIdx)
1419 MIB1.addReg(RegX, getKillRegState(KillX), SubRegX);
1420 else if (Idx == PrevSecondOpIdx)
1421 MIB1.addReg(RegY, getKillRegState(KillY), SubRegY);
1422 else
1423 MIB1.add(MO);
1424 }
1425 MIB1.copyImplicitOps(Prev);
1426
1427 if (SwapRootOperands) {
1428 std::swap(RegA, NewVR);
1429 std::swap(SubRegA, SubRegNewVR);
1430 std::swap(KillA, KillNewVR);
1431 }
1432
1433 MachineInstrBuilder MIB2 =
1434 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1435 for (const auto &MO : Root.explicit_operands()) {
1436 unsigned Idx = MO.getOperandNo();
1437 // Skip the result operand.
1438 if (Idx == 0)
1439 continue;
1440 if (Idx == RootFirstOpIdx)
1441 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA), SubRegA);
1442 else if (Idx == RootSecondOpIdx)
1443 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR), SubRegNewVR);
1444 else
1445 MIB2 = MIB2.add(MO);
1446 }
1447 MIB2.copyImplicitOps(Root);
1448
1449 // Propagate FP flags from the original instructions.
1450 // But clear poison-generating flags because those may not be valid now.
1451 // TODO: There should be a helper function for copying only fast-math-flags.
1452 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1453 MIB1->setFlags(IntersectedFlags);
1458
1459 MIB2->setFlags(IntersectedFlags);
1464
1465 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1466
1467 // Record new instructions for insertion and old instructions for deletion.
1468 InsInstrs.push_back(MIB1);
1469 InsInstrs.push_back(MIB2);
1470 DelInstrs.push_back(&Prev);
1471 DelInstrs.push_back(&Root);
1472
1473 // We transformed:
1474 // B = A op X (Prev)
1475 // C = B op Y (Root)
1476 // Into:
1477 // B = X op Y (MIB1)
1478 // C = A op B (MIB2)
1479 // C has the same value as before, B doesn't; as such, keep the debug number
1480 // of C but not of B.
1481 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1482 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1483}
1484
1486 MachineInstr &Root, unsigned Pattern,
1489 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1491 MachineBasicBlock &MBB = *Root.getParent();
1492 MachineFunction &MF = *MBB.getParent();
1494
1495 switch (Pattern) {
1500 // Select the previous instruction in the sequence based on the input
1501 // pattern.
1502 std::array<unsigned, 5> OperandIndices;
1504 MachineInstr *Prev =
1505 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1506
1507 // Don't reassociate if Prev and Root are in different blocks.
1508 if (Prev->getParent() != Root.getParent())
1509 return;
1510
1511 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1512 InstIdxForVirtReg);
1513 break;
1514 }
1516 SmallVector<Register, 32> ChainRegs;
1517 getAccumulatorChain(&Root, ChainRegs);
1518 unsigned int Depth = ChainRegs.size();
1520 "Max accumulator width set to illegal value");
1521 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1522 ? Log2_32(Depth)
1524
1525 // Walk down the chain and rewrite it as a tree.
1526 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1527 // No need to rewrite the first node, it is already perfect as it is.
1528 if (IndexedReg.index() == 0)
1529 continue;
1530
1531 // FIXME: Losing subregisters
1532 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1534 Register AccReg;
1535 if (IndexedReg.index() < MaxWidth) {
1536 // Now we need to create new instructions for the first row.
1537 AccReg = Instr->getOperand(0).getReg();
1538 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1539
1540 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1541 .addReg(Instr->getOperand(2).getReg(),
1542 getKillRegState(Instr->getOperand(2).isKill()))
1543 .addReg(Instr->getOperand(3).getReg(),
1544 getKillRegState(Instr->getOperand(3).isKill()));
1545 } else {
1546 // For the remaining cases, we need to use an output register of one of
1547 // the newly inserted instuctions as operand 1
1548 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1549 ? MRI.createVirtualRegister(
1550 MRI.getRegClass(Root.getOperand(0).getReg()))
1551 : Instr->getOperand(0).getReg();
1552 assert(IndexedReg.index() >= MaxWidth);
1553 auto AccumulatorInput =
1554 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1555 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1556 AccReg)
1557 .addReg(AccumulatorInput, getKillRegState(true))
1558 .addReg(Instr->getOperand(2).getReg(),
1559 getKillRegState(Instr->getOperand(2).isKill()))
1560 .addReg(Instr->getOperand(3).getReg(),
1561 getKillRegState(Instr->getOperand(3).isKill()));
1562 }
1563
1564 MIB->setFlags(Instr->getFlags());
1565 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1566 InsInstrs.push_back(MIB);
1567 DelInstrs.push_back(Instr);
1568 }
1569
1570 SmallVector<Register, 8> RegistersToReduce;
1571 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1572 ++i) {
1573 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1574 RegistersToReduce.push_back(Reg);
1575 }
1576
1577 while (RegistersToReduce.size() > 1)
1578 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1579 InstIdxForVirtReg, Root.getOperand(0).getReg());
1580
1581 break;
1582 }
1583 }
1584}
1585
1589
1591 const MachineInstr &MI) const {
1592 const MachineFunction &MF = *MI.getMF();
1593 const MachineRegisterInfo &MRI = MF.getRegInfo();
1594
1595 // Remat clients assume operand 0 is the defined register.
1596 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1597 return false;
1598 Register DefReg = MI.getOperand(0).getReg();
1599
1600 // A sub-register definition can only be rematerialized if the instruction
1601 // doesn't read the other parts of the register. Otherwise it is really a
1602 // read-modify-write operation on the full virtual register which cannot be
1603 // moved safely.
1604 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1605 MI.readsVirtualRegister(DefReg))
1606 return false;
1607
1608 // A load from a fixed stack slot can be rematerialized. This may be
1609 // redundant with subsequent checks, but it's target-independent,
1610 // simple, and a common case.
1611 int FrameIdx = 0;
1612 if (isLoadFromStackSlot(MI, FrameIdx) &&
1613 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1614 return true;
1615
1616 // Avoid instructions obviously unsafe for remat.
1617 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1618 MI.hasUnmodeledSideEffects())
1619 return false;
1620
1621 // Don't remat inline asm. We have no idea how expensive it is
1622 // even if it's side effect free.
1623 if (MI.isInlineAsm())
1624 return false;
1625
1626 // Avoid instructions which load from potentially varying memory.
1627 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1628 return false;
1629
1630 // If any of the registers accessed are non-constant, conservatively assume
1631 // the instruction is not rematerializable.
1632 for (const MachineOperand &MO : MI.operands()) {
1633 if (!MO.isReg()) continue;
1634 Register Reg = MO.getReg();
1635 if (Reg == 0)
1636 continue;
1637
1638 // Check for a well-behaved physical register.
1639 if (Reg.isPhysical()) {
1640 if (MO.isUse()) {
1641 // If the physreg has no defs anywhere, it's just an ambient register
1642 // and we can freely move its uses. Alternatively, if it's allocatable,
1643 // it could get allocated to something with a def during allocation.
1644 if (!MRI.isConstantPhysReg(Reg))
1645 return false;
1646 } else {
1647 // A physreg def. We can't remat it.
1648 return false;
1649 }
1650 continue;
1651 }
1652
1653 // Only allow one virtual-register def. There may be multiple defs of the
1654 // same virtual register, though.
1655 if (MO.isDef() && Reg != DefReg)
1656 return false;
1657 }
1658
1659 // Everything checked out.
1660 return true;
1661}
1662
1664 const MachineFunction *MF = MI.getMF();
1666 bool StackGrowsDown =
1668
1669 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1670 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1671
1672 if (!isFrameInstr(MI))
1673 return 0;
1674
1675 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1676
1677 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1678 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1679 SPAdj = -SPAdj;
1680
1681 return SPAdj;
1682}
1683
1684/// isSchedulingBoundary - Test if the given instruction should be
1685/// considered a scheduling boundary. This primarily includes labels
1686/// and terminators.
1688 const MachineBasicBlock *MBB,
1689 const MachineFunction &MF) const {
1690 // Terminators and labels can't be scheduled around.
1691 if (MI.isTerminator() || MI.isPosition())
1692 return true;
1693
1694 // INLINEASM_BR can jump to another block
1695 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1696 return true;
1697
1698 // Don't attempt to schedule around any instruction that defines
1699 // a stack-oriented pointer, as it's unlikely to be profitable. This
1700 // saves compile time, because it doesn't require every single
1701 // stack slot reference to depend on the instruction that does the
1702 // modification.
1703 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1704 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), &TRI);
1705}
1706
1707// Provide a global flag for disabling the PreRA hazard recognizer that targets
1708// may choose to honor.
1712
1713// Default implementation of CreateTargetRAHazardRecognizer.
1716 const ScheduleDAG *DAG) const {
1717 // Dummy hazard recognizer allows all instructions to issue.
1718 return new ScheduleHazardRecognizer();
1719}
1720
1721// Default implementation of CreateTargetMIHazardRecognizer.
1723 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1724 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1725}
1726
1727// Default implementation of CreateTargetPostRAHazardRecognizer.
1733
1734// Default implementation of getMemOperandWithOffset.
1736 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1737 bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
1740 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1741 Width, &TRI) ||
1742 BaseOps.size() != 1)
1743 return false;
1744 BaseOp = BaseOps.front();
1745 return true;
1746}
1747
1748//===----------------------------------------------------------------------===//
1749// SelectionDAG latency interface.
1750//===----------------------------------------------------------------------===//
1751
1752std::optional<unsigned>
1754 SDNode *DefNode, unsigned DefIdx,
1755 SDNode *UseNode, unsigned UseIdx) const {
1756 if (!ItinData || ItinData->isEmpty())
1757 return std::nullopt;
1758
1759 if (!DefNode->isMachineOpcode())
1760 return std::nullopt;
1761
1762 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1763 if (!UseNode->isMachineOpcode())
1764 return ItinData->getOperandCycle(DefClass, DefIdx);
1765 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1766 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1767}
1768
1770 SDNode *N) const {
1771 if (!ItinData || ItinData->isEmpty())
1772 return 1;
1773
1774 if (!N->isMachineOpcode())
1775 return 1;
1776
1777 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1778}
1779
1780//===----------------------------------------------------------------------===//
1781// MachineInstr latency interface.
1782//===----------------------------------------------------------------------===//
1783
1785 const MachineInstr &MI) const {
1786 if (!ItinData || ItinData->isEmpty())
1787 return 1;
1788
1789 unsigned Class = MI.getDesc().getSchedClass();
1790 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1791 if (UOps >= 0)
1792 return UOps;
1793
1794 // The # of u-ops is dynamically determined. The specific target should
1795 // override this function to return the right number.
1796 return 1;
1797}
1798
1799/// Return the default expected latency for a def based on it's opcode.
1801 const MachineInstr &DefMI) const {
1802 if (DefMI.isTransient())
1803 return 0;
1804 if (DefMI.mayLoad())
1805 return SchedModel.LoadLatency;
1806 if (isHighLatencyDef(DefMI.getOpcode()))
1807 return SchedModel.HighLatency;
1808 return 1;
1809}
1810
1812 return 0;
1813}
1814
1816 const MachineInstr &MI,
1817 unsigned *PredCost) const {
1818 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1819 // still have a MinLatency property, which getStageLatency checks.
1820 if (!ItinData)
1821 return MI.mayLoad() ? 2 : 1;
1822
1823 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1824}
1825
1827 const MachineInstr &DefMI,
1828 unsigned DefIdx) const {
1829 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1830 if (!ItinData || ItinData->isEmpty())
1831 return false;
1832
1833 unsigned DefClass = DefMI.getDesc().getSchedClass();
1834 std::optional<unsigned> DefCycle =
1835 ItinData->getOperandCycle(DefClass, DefIdx);
1836 return DefCycle && DefCycle <= 1U;
1837}
1838
1840 // TODO: We don't split functions where a section attribute has been set
1841 // since the split part may not be placed in a contiguous region. It may also
1842 // be more beneficial to augment the linker to ensure contiguous layout of
1843 // split functions within the same section as specified by the attribute.
1844 if (MF.getFunction().hasSection())
1845 return false;
1846
1847 // We don't want to proceed further for cold functions
1848 // or functions of unknown hotness. Lukewarm functions have no prefix.
1849 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1850 if (SectionPrefix &&
1851 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1852 return false;
1853 }
1854
1855 return true;
1856}
1857
1858std::optional<ParamLoadedValue>
1860 Register Reg) const {
1861 const MachineFunction *MF = MI.getMF();
1863 int64_t Offset;
1864 bool OffsetIsScalable;
1865
1866 // To simplify the sub-register handling, verify that we only need to
1867 // consider physical registers.
1868 assert(MF->getProperties().hasNoVRegs());
1869
1870 if (auto DestSrc = isCopyInstr(MI)) {
1871 Register DestReg = DestSrc->Destination->getReg();
1872
1873 // If the copy destination is the forwarding reg, describe the forwarding
1874 // reg using the copy source as the backup location. Example:
1875 //
1876 // x0 = MOV x7
1877 // call callee(x0) ; x0 described as x7
1878 if (Reg == DestReg)
1879 return ParamLoadedValue(*DestSrc->Source, Expr);
1880
1881 // If the target's hook couldn't describe this copy, give up.
1882 return std::nullopt;
1883 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1884 Register SrcReg = RegImm->Reg;
1885 Offset = RegImm->Imm;
1887 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1888 } else if (MI.hasOneMemOperand()) {
1889 // Only describe memory which provably does not escape the function. As
1890 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1891 // callee (or by another thread).
1892 const MachineFrameInfo &MFI = MF->getFrameInfo();
1893 const MachineMemOperand *MMO = MI.memoperands()[0];
1894 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1895
1896 // If the address points to "special" memory (e.g. a spill slot), it's
1897 // sufficient to check that it isn't aliased by any high-level IR value.
1898 if (!PSV || PSV->mayAlias(&MFI))
1899 return std::nullopt;
1900
1901 const MachineOperand *BaseOp;
1902 if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
1903 return std::nullopt;
1904
1905 // FIXME: Scalable offsets are not yet handled in the offset code below.
1906 if (OffsetIsScalable)
1907 return std::nullopt;
1908
1909 // TODO: Can currently only handle mem instructions with a single define.
1910 // An example from the x86 target:
1911 // ...
1912 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1913 // ...
1914 //
1915 if (MI.getNumExplicitDefs() != 1)
1916 return std::nullopt;
1917
1918 // TODO: In what way do we need to take Reg into consideration here?
1919
1922 Ops.push_back(dwarf::DW_OP_deref_size);
1923 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1924 : ~UINT64_C(0));
1925 Expr = DIExpression::prependOpcodes(Expr, Ops);
1926 return ParamLoadedValue(*BaseOp, Expr);
1927 }
1928
1929 return std::nullopt;
1930}
1931
1932// Get the call frame size just before MI.
1934 // Search backwards from MI for the most recent call frame instruction.
1935 MachineBasicBlock *MBB = MI.getParent();
1936 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1937 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1938 return getFrameTotalSize(AdjI);
1939 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1940 return 0;
1941 }
1942
1943 // If none was found, use the call frame size from the start of the basic
1944 // block.
1945 return MBB->getCallFrameSize();
1946}
1947
1948/// Both DefMI and UseMI must be valid. By default, call directly to the
1949/// itinerary. This may be overriden by the target.
1951 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1952 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1953 unsigned DefClass = DefMI.getDesc().getSchedClass();
1954 unsigned UseClass = UseMI.getDesc().getSchedClass();
1955 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1956}
1957
1959 const MachineInstr &MI, unsigned DefIdx,
1960 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1961 assert((MI.isRegSequence() ||
1962 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1963
1964 if (!MI.isRegSequence())
1965 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1966
1967 // We are looking at:
1968 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1969 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1970 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1971 OpIdx += 2) {
1972 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1973 if (MOReg.isUndef())
1974 continue;
1975 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1976 assert(MOSubIdx.isImm() &&
1977 "One of the subindex of the reg_sequence is not an immediate");
1978 // Record Reg:SubReg, SubIdx.
1979 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1980 (unsigned)MOSubIdx.getImm()));
1981 }
1982 return true;
1983}
1984
1986 const MachineInstr &MI, unsigned DefIdx,
1987 RegSubRegPairAndIdx &InputReg) const {
1988 assert((MI.isExtractSubreg() ||
1989 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1990
1991 if (!MI.isExtractSubreg())
1992 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1993
1994 // We are looking at:
1995 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1996 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1997 const MachineOperand &MOReg = MI.getOperand(1);
1998 if (MOReg.isUndef())
1999 return false;
2000 const MachineOperand &MOSubIdx = MI.getOperand(2);
2001 assert(MOSubIdx.isImm() &&
2002 "The subindex of the extract_subreg is not an immediate");
2003
2004 InputReg.Reg = MOReg.getReg();
2005 InputReg.SubReg = MOReg.getSubReg();
2006 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2007 return true;
2008}
2009
2011 const MachineInstr &MI, unsigned DefIdx,
2012 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2013 assert((MI.isInsertSubreg() ||
2014 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2015
2016 if (!MI.isInsertSubreg())
2017 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2018
2019 // We are looking at:
2020 // Def = INSERT_SEQUENCE v0, v1, sub0.
2021 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2022 const MachineOperand &MOBaseReg = MI.getOperand(1);
2023 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2024 if (MOInsertedReg.isUndef())
2025 return false;
2026 const MachineOperand &MOSubIdx = MI.getOperand(3);
2027 assert(MOSubIdx.isImm() &&
2028 "One of the subindex of the reg_sequence is not an immediate");
2029 BaseReg.Reg = MOBaseReg.getReg();
2030 BaseReg.SubReg = MOBaseReg.getSubReg();
2031
2032 InsertedReg.Reg = MOInsertedReg.getReg();
2033 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2034 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2035 return true;
2036}
2037
2038// Returns a MIRPrinter comment for this machine operand.
2040 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2041 const TargetRegisterInfo * /*RemoveMe*/) const {
2042
2043 if (!MI.isInlineAsm())
2044 return "";
2045
2046 std::string Flags;
2047 raw_string_ostream OS(Flags);
2048
2050 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2051 unsigned ExtraInfo = Op.getImm();
2052 OS << interleaved(InlineAsm::getExtraInfoNames(ExtraInfo), " ");
2053 return Flags;
2054 }
2055
2056 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2057 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2058 return "";
2059
2060 assert(Op.isImm() && "Expected flag operand to be an immediate");
2061 // Pretty print the inline asm operand descriptor.
2062 unsigned Flag = Op.getImm();
2063 const InlineAsm::Flag F(Flag);
2064 OS << F.getKindName();
2065
2066 unsigned RCID;
2067 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID))
2068 OS << ':' << TRI.getRegClassName(TRI.getRegClass(RCID));
2069
2070 if (F.isMemKind()) {
2071 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2073 }
2074
2075 unsigned TiedTo;
2076 if (F.isUseOperandTiedToDef(TiedTo))
2077 OS << " tiedto:$" << TiedTo;
2078
2079 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2080 F.getRegMayBeFolded())
2081 OS << " foldable";
2082
2083 return Flags;
2084}
2085
2087
2089 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2090 // Include target features from an arbitrary candidate for the outlined
2091 // function. This makes sure the outlined function knows what kinds of
2092 // instructions are going into it. This is fine, since all parent functions
2093 // must necessarily support the instructions that are in the outlined region.
2094 outliner::Candidate &FirstCand = Candidates.front();
2095 const Function &ParentFn = FirstCand.getMF()->getFunction();
2096 if (ParentFn.hasFnAttribute("target-features"))
2097 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2098 if (ParentFn.hasFnAttribute("target-cpu"))
2099 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2100
2101 // Set nounwind, so we don't generate eh_frame.
2102 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2103 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2104 }))
2105 F.addFnAttr(Attribute::NoUnwind);
2106}
2107
2111 unsigned Flags) const {
2112 MachineInstr &MI = *MIT;
2113
2114 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2115 // have support for outlining those. Special-case that here.
2116 if (MI.isCFIInstruction())
2117 // Just go right to the target implementation.
2118 return getOutliningTypeImpl(MMI, MIT, Flags);
2119
2120 // Be conservative about inline assembly.
2121 if (MI.isInlineAsm())
2123
2124 // Labels generally can't safely be outlined.
2125 if (MI.isLabel())
2127
2128 // Don't let debug instructions impact analysis.
2129 if (MI.isDebugInstr())
2131
2132 // Some other special cases.
2133 switch (MI.getOpcode()) {
2134 case TargetOpcode::IMPLICIT_DEF:
2135 case TargetOpcode::KILL:
2136 case TargetOpcode::LIFETIME_START:
2137 case TargetOpcode::LIFETIME_END:
2139 default:
2140 break;
2141 }
2142
2143 // Is this a terminator for a basic block?
2144 if (MI.isTerminator()) {
2145 // If this is a branch to another block, we can't outline it.
2146 if (!MI.getParent()->succ_empty())
2148
2149 // Don't outline if the branch is not unconditional.
2150 if (isPredicated(MI))
2152 }
2153
2154 // Make sure none of the operands of this instruction do anything that
2155 // might break if they're moved outside their current function.
2156 // This includes MachineBasicBlock references, BlockAddressses,
2157 // Constant pool indices and jump table indices.
2158 //
2159 // A quick note on MO_TargetIndex:
2160 // This doesn't seem to be used in any of the architectures that the
2161 // MachineOutliner supports, but it was still filtered out in all of them.
2162 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2163 // As such, this check is removed both here and in the target-specific
2164 // implementations. Instead, we assert to make sure this doesn't
2165 // catch anyone off-guard somewhere down the line.
2166 for (const MachineOperand &MOP : MI.operands()) {
2167 // If you hit this assertion, please remove it and adjust
2168 // `getOutliningTypeImpl` for your target appropriately if necessary.
2169 // Adding the assertion back to other supported architectures
2170 // would be nice too :)
2171 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2172
2173 // CFI instructions should already have been filtered out at this point.
2174 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2175
2176 // PrologEpilogInserter should've already run at this point.
2177 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2178
2179 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2181 }
2182
2183 // If we don't know, delegate to the target-specific hook.
2184 return getOutliningTypeImpl(MMI, MIT, Flags);
2185}
2186
2188 unsigned &Flags) const {
2189 // Some instrumentations create special TargetOpcode at the start which
2190 // expands to special code sequences which must be present.
2191 auto First = MBB.getFirstNonDebugInstr();
2192 if (First == MBB.end())
2193 return true;
2194
2195 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2196 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2197 return false;
2198
2199 // Some instrumentations create special pseudo-instructions at or just before
2200 // the end that must be present.
2201 auto Last = MBB.getLastNonDebugInstr();
2202 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2203 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2204 return false;
2205
2206 if (Last != First && Last->isReturn()) {
2207 --Last;
2208 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2209 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2210 return false;
2211 }
2212 return true;
2213}
2214
2216 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2217 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2218}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:214
A debug info location.
Definition DebugLoc.h:123
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:765
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:309
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:446
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:527
StringRef getCommentString() const
Definition MCAsmInfo.h:538
const char * getSeparatorString() const
Definition MCAsmInfo.h:533
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
const TargetRegisterInfo & TRI
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:631
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
@ Length
Definition DWP.cpp:532
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const