LLVM 23.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
40
41using namespace llvm;
42
44 "disable-sched-hazard", cl::Hidden, cl::init(false),
45 cl::desc("Disable hazard detection during preRA scheduling"));
46
48 "acc-reassoc", cl::Hidden, cl::init(true),
49 cl::desc("Enable reassociation of accumulation chains"));
50
53 cl::desc("Minimum length of accumulator chains "
54 "required for the optimization to kick in"));
55
57 "acc-max-width", cl::Hidden, cl::init(3),
58 cl::desc("Maximum number of branches in the accumulator tree"));
59
61
63 unsigned OpNum) const {
64 if (OpNum >= MCID.getNumOperands())
65 return nullptr;
66
67 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
68 int16_t RegClass = getOpRegClassID(OpInfo);
69
70 // Instructions like INSERT_SUBREG do not have fixed register classes.
71 if (RegClass < 0)
72 return nullptr;
73
74 // Otherwise just look it up normally.
75 return TRI.getRegClass(RegClass);
76}
77
78/// insertNoop - Insert a noop into the instruction stream at the specified
79/// point.
82 llvm_unreachable("Target didn't implement insertNoop!");
83}
84
85/// insertNoops - Insert noops into the instruction stream at the specified
86/// point.
89 unsigned Quantity) const {
90 for (unsigned i = 0; i < Quantity; ++i)
92}
93
94static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
95 return strncmp(Str, MAI.getCommentString().data(),
96 MAI.getCommentString().size()) == 0;
97}
98
99/// Measure the specified inline asm to determine an approximation of its
100/// length.
101/// Comments (which run till the next SeparatorString or newline) do not
102/// count as an instruction.
103/// Any other non-whitespace text is considered an instruction, with
104/// multiple instructions separated by SeparatorString or newlines.
105/// Variable-length instructions are not handled here; this function
106/// may be overloaded in the target code to do that.
107/// We implement a special case of the .space directive which takes only a
108/// single integer argument in base 10 that is the size in bytes. This is a
109/// restricted form of the GAS directive in that we only interpret
110/// simple--i.e. not a logical or arithmetic expression--size values without
111/// the optional fill value. This is primarily used for creating arbitrary
112/// sized inline asm blocks for testing purposes.
114 const char *Str,
115 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
116 // Count the number of instructions in the asm.
117 bool AtInsnStart = true;
118 unsigned Length = 0;
119 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
120 for (; *Str; ++Str) {
121 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
122 strlen(MAI.getSeparatorString())) == 0) {
123 AtInsnStart = true;
124 } else if (isAsmComment(Str, MAI)) {
125 // Stop counting as an instruction after a comment until the next
126 // separator.
127 AtInsnStart = false;
128 }
129
130 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
131 unsigned AddLength = MaxInstLength;
132 if (strncmp(Str, ".space", 6) == 0) {
133 char *EStr;
134 int SpaceSize;
135 SpaceSize = strtol(Str + 6, &EStr, 10);
136 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
137 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
138 ++EStr;
139 if (*EStr == '\0' || *EStr == '\n' ||
140 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
141 AddLength = SpaceSize;
142 }
143 Length += AddLength;
144 AtInsnStart = false;
145 }
146 }
147
148 return Length;
149}
150
151/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
152/// after it, replacing it with an unconditional branch to NewDest.
153void
155 MachineBasicBlock *NewDest) const {
156 MachineBasicBlock *MBB = Tail->getParent();
157
158 // Remove all the old successors of MBB from the CFG.
159 while (!MBB->succ_empty())
160 MBB->removeSuccessor(MBB->succ_begin());
161
162 // Save off the debug loc before erasing the instruction.
163 DebugLoc DL = Tail->getDebugLoc();
164
165 // Update call info and remove all the dead instructions
166 // from the end of MBB.
167 while (Tail != MBB->end()) {
168 auto MI = Tail++;
169 if (MI->shouldUpdateAdditionalCallInfo())
170 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
171 MBB->erase(MI);
172 }
173
174 // If MBB isn't immediately before MBB, insert a branch to it.
176 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
177 MBB->addSuccessor(NewDest);
178}
179
181 bool NewMI, unsigned Idx1,
182 unsigned Idx2) const {
183 const MCInstrDesc &MCID = MI.getDesc();
184 bool HasDef = MCID.getNumDefs();
185 if (HasDef && !MI.getOperand(0).isReg())
186 // No idea how to commute this instruction. Target should implement its own.
187 return nullptr;
188
189 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
190 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
191 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
192 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
193 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
194 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
195 "This only knows how to commute register operands so far");
196
197 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
198 Register Reg1 = MI.getOperand(Idx1).getReg();
199 Register Reg2 = MI.getOperand(Idx2).getReg();
200 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
201 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
202 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
203 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
204 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
205 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
206 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
207 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
208 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
209 // Avoid calling isRenamable for virtual registers since we assert that
210 // renamable property is only queried/set for physical registers.
211 bool Reg1IsRenamable =
212 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
213 bool Reg2IsRenamable =
214 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
215
216 // For a case like this:
217 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
218 // we need to update the implicit-def after commuting to result in:
219 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
220 SmallVector<unsigned> UpdateImplicitDefIdx;
221 if (HasDef && MI.hasImplicitDef()) {
222 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
223 Register ImplReg = MO.getReg();
224 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
225 (ImplReg.isPhysical() && Reg0.isPhysical() &&
226 TRI.isSubRegisterEq(ImplReg, Reg0)))
227 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
228 }
229 }
230
231 // If destination is tied to either of the commuted source register, then
232 // it must be updated.
233 if (HasDef && Reg0 == Reg1 &&
234 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
235 Reg2IsKill = false;
236 Reg0 = Reg2;
237 SubReg0 = SubReg2;
238 } else if (HasDef && Reg0 == Reg2 &&
239 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
240 Reg1IsKill = false;
241 Reg0 = Reg1;
242 SubReg0 = SubReg1;
243 }
244
245 MachineInstr *CommutedMI = nullptr;
246 if (NewMI) {
247 // Create a new instruction.
248 MachineFunction &MF = *MI.getMF();
249 CommutedMI = MF.CloneMachineInstr(&MI);
250 } else {
251 CommutedMI = &MI;
252 }
253
254 if (HasDef) {
255 CommutedMI->getOperand(0).setReg(Reg0);
256 CommutedMI->getOperand(0).setSubReg(SubReg0);
257 for (unsigned Idx : UpdateImplicitDefIdx)
258 CommutedMI->getOperand(Idx).setReg(Reg0);
259 }
260 CommutedMI->getOperand(Idx2).setReg(Reg1);
261 CommutedMI->getOperand(Idx1).setReg(Reg2);
262 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
263 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
264 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
265 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
266 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
267 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
268 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
269 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
270 // Avoid calling setIsRenamable for virtual registers since we assert that
271 // renamable property is only queried/set for physical registers.
272 if (Reg1.isPhysical())
273 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
274 if (Reg2.isPhysical())
275 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
276 return CommutedMI;
277}
278
280 unsigned OpIdx1,
281 unsigned OpIdx2) const {
282 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
283 // any commutable operand, which is done in findCommutedOpIndices() method
284 // called below.
285 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
286 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
287 assert(MI.isCommutable() &&
288 "Precondition violation: MI must be commutable.");
289 return nullptr;
290 }
291 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
292}
293
295 unsigned &ResultIdx2,
296 unsigned CommutableOpIdx1,
297 unsigned CommutableOpIdx2) {
298 if (ResultIdx1 == CommuteAnyOperandIndex &&
299 ResultIdx2 == CommuteAnyOperandIndex) {
300 ResultIdx1 = CommutableOpIdx1;
301 ResultIdx2 = CommutableOpIdx2;
302 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
303 if (ResultIdx2 == CommutableOpIdx1)
304 ResultIdx1 = CommutableOpIdx2;
305 else if (ResultIdx2 == CommutableOpIdx2)
306 ResultIdx1 = CommutableOpIdx1;
307 else
308 return false;
309 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
310 if (ResultIdx1 == CommutableOpIdx1)
311 ResultIdx2 = CommutableOpIdx2;
312 else if (ResultIdx1 == CommutableOpIdx2)
313 ResultIdx2 = CommutableOpIdx1;
314 else
315 return false;
316 } else
317 // Check that the result operand indices match the given commutable
318 // operand indices.
319 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
320 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
321
322 return true;
323}
324
326 unsigned &SrcOpIdx1,
327 unsigned &SrcOpIdx2) const {
328 assert(!MI.isBundle() &&
329 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
330
331 const MCInstrDesc &MCID = MI.getDesc();
332 if (!MCID.isCommutable())
333 return false;
334
335 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
336 // is not true, then the target must implement this.
337 unsigned CommutableOpIdx1 = MCID.getNumDefs();
338 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
339 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
340 CommutableOpIdx1, CommutableOpIdx2))
341 return false;
342
343 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
344 // No idea.
345 return false;
346 return true;
347}
348
350 if (!MI.isTerminator()) return false;
351
352 // Conditional branch is a special case.
353 if (MI.isBranch() && !MI.isBarrier())
354 return true;
355 if (!MI.isPredicable())
356 return true;
357 return !isPredicated(MI);
358}
359
362 bool MadeChange = false;
363
364 assert(!MI.isBundle() &&
365 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
366
367 const MCInstrDesc &MCID = MI.getDesc();
368 if (!MI.isPredicable())
369 return false;
370
371 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
372 if (MCID.operands()[i].isPredicate()) {
373 MachineOperand &MO = MI.getOperand(i);
374 if (MO.isReg()) {
375 MO.setReg(Pred[j].getReg());
376 MadeChange = true;
377 } else if (MO.isImm()) {
378 MO.setImm(Pred[j].getImm());
379 MadeChange = true;
380 } else if (MO.isMBB()) {
381 MO.setMBB(Pred[j].getMBB());
382 MadeChange = true;
383 }
384 ++j;
385 }
386 }
387 return MadeChange;
388}
389
391 const MachineInstr &MI,
393 size_t StartSize = Accesses.size();
394 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
395 oe = MI.memoperands_end();
396 o != oe; ++o) {
397 if ((*o)->isLoad() &&
398 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
399 Accesses.push_back(*o);
400 }
401 return Accesses.size() != StartSize;
402}
403
405 const MachineInstr &MI,
407 size_t StartSize = Accesses.size();
408 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
409 oe = MI.memoperands_end();
410 o != oe; ++o) {
411 if ((*o)->isStore() &&
412 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
413 Accesses.push_back(*o);
414 }
415 return Accesses.size() != StartSize;
416}
417
419 unsigned SubIdx, unsigned &Size,
420 unsigned &Offset,
421 const MachineFunction &MF) const {
422 if (!SubIdx) {
423 Size = TRI.getSpillSize(*RC);
424 Offset = 0;
425 return true;
426 }
427 unsigned BitSize = TRI.getSubRegIdxSize(SubIdx);
428 // Convert bit size to byte size.
429 if (BitSize % 8)
430 return false;
431
432 int BitOffset = TRI.getSubRegIdxOffset(SubIdx);
433 if (BitOffset < 0 || BitOffset % 8)
434 return false;
435
436 Size = BitSize / 8;
437 Offset = (unsigned)BitOffset / 8;
438
439 assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
440
441 if (!MF.getDataLayout().isLittleEndian()) {
442 Offset = TRI.getSpillSize(*RC) - (Offset + Size);
443 }
444 return true;
445}
446
449 Register DestReg, unsigned SubIdx,
450 const MachineInstr &Orig,
451 LaneBitmask UsedLanes) const {
452 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
453 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
454 MBB.insert(I, MI);
455}
456
458 const MachineInstr &MI1,
459 const MachineRegisterInfo *MRI) const {
461}
462
465 MachineBasicBlock::iterator InsertBefore,
466 const MachineInstr &Orig) const {
467 MachineFunction &MF = *MBB.getParent();
468 // CFI instructions are marked as non-duplicable, because Darwin compact
469 // unwind info emission can't handle multiple prologue setups.
470 assert((!Orig.isNotDuplicable() ||
472 Orig.isCFIInstruction())) &&
473 "Instruction cannot be duplicated");
474
475 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
476}
477
478// If the COPY instruction in MI can be folded to a stack operation, return
479// the register class to use.
481 const TargetInstrInfo &TII,
482 unsigned FoldIdx) {
483 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
484 if (MI.getNumOperands() != 2)
485 return nullptr;
486 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
487
488 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
489 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
490
491 if (FoldOp.getSubReg() || LiveOp.getSubReg())
492 return nullptr;
493
494 Register FoldReg = FoldOp.getReg();
495 Register LiveReg = LiveOp.getReg();
496
497 assert(FoldReg.isVirtual() && "Cannot fold physregs");
498
499 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
500 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
501
502 if (LiveOp.getReg().isPhysical())
503 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
504
505 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
506 return RC;
507
508 // FIXME: Allow folding when register classes are memory compatible.
509 return nullptr;
510}
511
512MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
513
514/// Try to remove the load by folding it to a register
515/// operand at the use. We fold the load instructions if load defines a virtual
516/// register, the virtual register is used once in the same BB, and the
517/// instructions in-between do not load or store, and have no side effects.
519 const MachineRegisterInfo *MRI,
520 Register &FoldAsLoadDefReg,
522 MachineInstr *&CopyMI) const {
523 // Check whether we can move DefMI here.
524 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
525 assert(DefMI);
526 bool SawStore = false;
527 if (!DefMI->isSafeToMove(SawStore))
528 return nullptr;
529
530 // Collect information about virtual register operands of MI.
531 SmallVector<unsigned, 1> SrcOperandIds;
532 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
533 MachineOperand &MO = MI.getOperand(i);
534 if (!MO.isReg())
535 continue;
536 Register Reg = MO.getReg();
537 if (Reg != FoldAsLoadDefReg)
538 continue;
539 // Do not fold if we have a subreg use or a def.
540 if (MO.getSubReg() || MO.isDef())
541 return nullptr;
542 SrcOperandIds.push_back(i);
543 }
544 if (SrcOperandIds.empty())
545 return nullptr;
546
547 // Check whether we can fold the def into SrcOperandId.
548 if (MachineInstr *FoldMI =
549 foldMemoryOperand(MI, SrcOperandIds, *DefMI, CopyMI)) {
550 FoldAsLoadDefReg = 0;
551 return FoldMI;
552 }
553
554 return nullptr;
555}
556
557std::pair<unsigned, unsigned>
559 switch (MI.getOpcode()) {
560 case TargetOpcode::STACKMAP:
561 // StackMapLiveValues are foldable
562 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
563 case TargetOpcode::PATCHPOINT:
564 // For PatchPoint, the call args are not foldable (even if reported in the
565 // stackmap e.g. via anyregcc).
566 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
567 case TargetOpcode::STATEPOINT:
568 // For statepoints, fold deopt and gc arguments, but not call arguments.
569 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
570 default:
571 llvm_unreachable("unexpected stackmap opcode");
572 }
573}
574
576 ArrayRef<unsigned> Ops, int FrameIndex,
577 const TargetInstrInfo &TII) {
578 unsigned StartIdx = 0;
579 unsigned NumDefs = 0;
580 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
581 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
582
583 unsigned DefToFoldIdx = MI.getNumOperands();
584
585 // Return false if any operands requested for folding are not foldable (not
586 // part of the stackmap's live values).
587 for (unsigned Op : Ops) {
588 if (Op < NumDefs) {
589 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
590 DefToFoldIdx = Op;
591 } else if (Op < StartIdx) {
592 return nullptr;
593 }
594 if (MI.getOperand(Op).isTied())
595 return nullptr;
596 }
597
598 MachineInstr *NewMI =
599 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
600 MachineInstrBuilder MIB(MF, NewMI);
601
602 // No need to fold return, the meta data, and function arguments
603 for (unsigned i = 0; i < StartIdx; ++i)
604 if (i != DefToFoldIdx)
605 MIB.add(MI.getOperand(i));
606
607 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
608 MachineOperand &MO = MI.getOperand(i);
609 unsigned TiedTo = e;
610 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
611
612 if (is_contained(Ops, i)) {
613 assert(TiedTo == e && "Cannot fold tied operands");
614 unsigned SpillSize;
615 unsigned SpillOffset;
616 // Compute the spill slot size and offset.
617 const TargetRegisterClass *RC =
618 MF.getRegInfo().getRegClass(MO.getReg());
619 bool Valid =
620 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
621 if (!Valid)
622 report_fatal_error("cannot spill patchpoint subregister operand");
623 MIB.addImm(StackMaps::IndirectMemRefOp);
624 MIB.addImm(SpillSize);
625 MIB.addFrameIndex(FrameIndex);
626 MIB.addImm(SpillOffset);
627 } else {
628 MIB.add(MO);
629 if (TiedTo < e) {
630 assert(TiedTo < NumDefs && "Bad tied operand");
631 if (TiedTo > DefToFoldIdx)
632 --TiedTo;
633 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
634 }
635 }
636 }
637 return NewMI;
638}
639
640static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
641 const TargetInstrInfo &TII) {
642 // If the machine operand is tied, untie it first.
643 if (MI->getOperand(OpNo).isTied()) {
644 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
645 MI->untieRegOperand(OpNo);
646 // Intentional recursion!
647 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
648 }
649
651 TII.getFrameIndexOperands(NewOps, FI);
652 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
653 MI->removeOperand(OpNo);
654 MI->insert(MI->operands_begin() + OpNo, NewOps);
655
656 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
657 // is the per-target number of operands that represent the memory operand
658 // excluding this one (MD). This includes MO.
660 F.setMemConstraint(InlineAsm::ConstraintCode::m);
661 MachineOperand &MD = MI->getOperand(OpNo - 1);
662 MD.setImm(F);
663}
664
665// Returns nullptr if not possible to fold.
667 ArrayRef<unsigned> Ops, int FI,
668 const TargetInstrInfo &TII) {
669 assert(MI.isInlineAsm() && "wrong opcode");
670 if (Ops.size() > 1)
671 return nullptr;
672 unsigned Op = Ops[0];
673 assert(Op && "should never be first operand");
674 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
675
676 if (!MI.mayFoldInlineAsmRegOp(Op))
677 return nullptr;
678
679 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
680
681 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
682
683 // Update mayload/maystore metadata, and memoperands.
684 const VirtRegInfo &RI =
685 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
688 if (RI.Reads) {
689 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
691 }
692 if (RI.Writes) {
693 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
695 }
696 MachineFunction *MF = NewMI.getMF();
697 const MachineFrameInfo &MFI = MF->getFrameInfo();
699 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
700 MFI.getObjectAlign(FI));
701 NewMI.addMemOperand(*MF, MMO);
702
703 return &NewMI;
704}
705
707 ArrayRef<unsigned> Ops, int FI,
708 MachineInstr *&CopyMI,
709 LiveIntervals *LIS,
710 VirtRegMap *VRM) const {
711 auto Flags = MachineMemOperand::MONone;
712 for (unsigned OpIdx : Ops)
713 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
715
716 MachineBasicBlock *MBB = MI.getParent();
717 assert(MBB && "foldMemoryOperand needs an inserted instruction");
718 MachineFunction &MF = *MBB->getParent();
719
720 // If we're not folding a load into a subreg, the size of the load is the
721 // size of the spill slot. But if we are, we need to figure out what the
722 // actual load size is.
723 int64_t MemSize = 0;
724 const MachineFrameInfo &MFI = MF.getFrameInfo();
725
726 if (Flags & MachineMemOperand::MOStore) {
727 MemSize = MFI.getObjectSize(FI);
728 } else {
729 for (unsigned OpIdx : Ops) {
730 int64_t OpSize = MFI.getObjectSize(FI);
731
732 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
733 unsigned SubRegSize = TRI.getSubRegIdxSize(SubReg);
734 if (SubRegSize > 0 && !(SubRegSize % 8))
735 OpSize = SubRegSize / 8;
736 }
737
738 MemSize = std::max(MemSize, OpSize);
739 }
740 }
741
742 assert(MemSize && "Did not expect a zero-sized stack slot");
743
744 MachineInstr *NewMI = nullptr;
745
746 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
747 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
748 MI.getOpcode() == TargetOpcode::STATEPOINT) {
749 // Fold stackmap/patchpoint.
750 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
751 if (NewMI)
752 MBB->insert(MI, NewMI);
753 } else if (MI.isInlineAsm()) {
754 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
755 } else {
756 // Ask the target to do the actual folding.
757 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, CopyMI, LIS, VRM);
758 }
759
760 if (NewMI) {
761 NewMI->setMemRefs(MF, MI.memoperands());
762 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
764 NewMI->mayStore()) &&
765 "Folded a def to a non-store!");
766 assert((!(Flags & MachineMemOperand::MOLoad) ||
767 NewMI->mayLoad()) &&
768 "Folded a use to a non-load!");
769 assert(MFI.getObjectOffset(FI) != -1);
770 MachineMemOperand *MMO =
772 Flags, MemSize, MFI.getObjectAlign(FI));
773 NewMI->addMemOperand(MF, MMO);
774
775 // The pass "x86 speculative load hardening" always attaches symbols to
776 // call instructions. We need copy it form old instruction.
777 NewMI->cloneInstrSymbols(MF, MI);
778
779 return NewMI;
780 }
781
782 // Straight COPY may fold as load/store.
783 if (!isCopyInstr(MI) || Ops.size() != 1)
784 return nullptr;
785
786 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
787 if (!RC)
788 return nullptr;
789
790 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
792 if (Flags == MachineMemOperand::MOStore) {
793 if (MO.isUndef()) {
794 // If this is an undef copy, we do not need to bother we inserting spill
795 // code.
796 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
797 } else {
798 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC,
799 Register());
800 }
801 } else
802 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, Register());
803
804 return &*--Pos;
805}
806
809 MachineInstr &LoadMI,
810 MachineInstr *&CopyMI,
811 LiveIntervals *LIS) const {
812 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
813#ifndef NDEBUG
814 for (unsigned OpIdx : Ops)
815 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
816#endif
817
818 MachineBasicBlock &MBB = *MI.getParent();
819 MachineFunction &MF = *MBB.getParent();
820
821 // Ask the target to do the actual folding.
822 MachineInstr *NewMI = nullptr;
823 int FrameIndex = 0;
824
825 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
826 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
827 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
828 isLoadFromStackSlot(LoadMI, FrameIndex)) {
829 // Fold stackmap/patchpoint.
830 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
831 if (NewMI)
832 NewMI = &*MBB.insert(MI, NewMI);
833 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
834 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
835 } else {
836 // Ask the target to do the actual folding.
837 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, CopyMI, LIS);
838 }
839
840 if (!NewMI)
841 return nullptr;
842
843 // Copy the memoperands from the load to the folded instruction.
844 if (MI.memoperands_empty()) {
845 NewMI->setMemRefs(MF, LoadMI.memoperands());
846 } else {
847 // Handle the rare case of folding multiple loads.
848 NewMI->setMemRefs(MF, MI.memoperands());
850 E = LoadMI.memoperands_end();
851 I != E; ++I) {
852 NewMI->addMemOperand(MF, *I);
853 }
854 }
855 return NewMI;
856}
857
858/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
859/// replacement instructions immediately precede it. Copy any implicit
860/// operands from MI to the replacement instruction.
862 const TargetRegisterInfo *TRI) {
864 --CopyMI;
865
866 Register DstReg = MI->getOperand(0).getReg();
867 for (const MachineOperand &MO : MI->implicit_operands()) {
868 CopyMI->addOperand(MO);
869
870 // Be conservative about preserving kills when subregister defs are
871 // involved. If there was implicit kill of a super-register overlapping the
872 // copy result, we would kill the subregisters previous copies defined.
873
874 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
875 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
876 }
877}
878
880 MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
881 if (MI->allDefsAreDead()) {
882 MI->setDesc(get(TargetOpcode::KILL));
883 return;
884 }
885
886 MachineOperand &DstMO = MI->getOperand(0);
887 MachineOperand &SrcMO = MI->getOperand(1);
888
889 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
890 if (IdentityCopy || SrcMO.isUndef()) {
891 // No need to insert an identity copy instruction, but replace with a KILL
892 // if liveness is changed.
893 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
894 // We must make sure the super-register gets killed. Replace the
895 // instruction with KILL.
896 MI->setDesc(get(TargetOpcode::KILL));
897 return;
898 }
899 // Vanilla identity copy.
900 MI->eraseFromParent();
901 return;
902 }
903
904 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
905 SrcMO.getReg(), SrcMO.isKill(),
906 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
907 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
908
909 if (MI->getNumOperands() > 2)
911 MI->eraseFromParent();
912}
913
915 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
916 const MachineOperand &Op1 = Inst.getOperand(1);
917 const MachineOperand &Op2 = Inst.getOperand(2);
918 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
919
920 // We need virtual register definitions for the operands that we will
921 // reassociate.
922 MachineInstr *MI1 = nullptr;
923 MachineInstr *MI2 = nullptr;
924 if (Op1.isReg() && Op1.getReg().isVirtual())
925 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
926 if (Op2.isReg() && Op2.getReg().isVirtual())
927 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
928
929 // And at least one operand must be defined in MBB.
930 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
931}
932
934 unsigned Opcode2) const {
935 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
936}
937
939 bool &Commuted) const {
940 const MachineBasicBlock *MBB = Inst.getParent();
941 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
942 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
943 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
944 unsigned Opcode = Inst.getOpcode();
945
946 // If only one operand has the same or inverse opcode and it's the second
947 // source operand, the operands must be commuted.
948 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
949 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
950 if (Commuted)
951 std::swap(MI1, MI2);
952
953 // 1. The previous instruction must be the same type as Inst.
954 // 2. The previous instruction must also be associative/commutative or be the
955 // inverse of such an operation (this can be different even for
956 // instructions with the same opcode if traits like fast-math-flags are
957 // included).
958 // 3. The previous instruction must have virtual register definitions for its
959 // operands in the same basic block as Inst.
960 // 4. The previous instruction's result must only be used by Inst.
961 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
963 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
965 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
966}
967
968// 1. The operation must be associative and commutative or be the inverse of
969// such an operation.
970// 2. The instruction must have virtual register definitions for its
971// operands in the same basic block.
972// 3. The instruction must have a reassociable sibling.
974 bool &Commuted) const {
975 return (isAssociativeAndCommutative(Inst) ||
976 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
977 hasReassociableOperands(Inst, Inst.getParent()) &&
978 hasReassociableSibling(Inst, Commuted);
979}
980
981// Utility routine that checks if \param MO is defined by an
982// \param CombineOpc instruction in the basic block \param MBB.
983// If \param CombineOpc is not provided, the OpCode check will
984// be skipped.
986 unsigned CombineOpc = 0) {
987 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
988 MachineInstr *MI = nullptr;
989
990 if (MO.isReg() && MO.getReg().isVirtual())
991 MI = MRI.getUniqueVRegDef(MO.getReg());
992 // And it needs to be in the trace (otherwise, it won't have a depth).
993 if (!MI || MI->getParent() != &MBB ||
994 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
995 return false;
996 // Must only used by the user we combine with.
997 if (!MRI.hasOneNonDBGUse(MO.getReg()))
998 return false;
999
1000 return true;
1001}
1002
1003// A chain of accumulation instructions will be selected IFF:
1004// 1. All the accumulation instructions in the chain have the same opcode,
1005// besides the first that has a slightly different opcode because it does
1006// not accumulate into a register.
1007// 2. All the instructions in the chain are combinable (have a single use
1008// which itself is part of the chain).
1009// 3. Meets the required minimum length.
1011 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1012 // Walk up the chain of accumulation instructions and collect them in the
1013 // vector.
1014 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1015 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1016 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1017 std::optional<unsigned> ChainStartOpCode =
1018 getAccumulationStartOpcode(AccumulatorOpcode);
1019
1020 if (!ChainStartOpCode.has_value())
1021 return;
1022
1023 // Push the first accumulator result to the start of the chain.
1024 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1025
1026 // Collect the accumulator input register from all instructions in the chain.
1027 while (CurrentInstr &&
1028 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1029 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1030 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1031 }
1032
1033 // Add the instruction at the top of the chain.
1034 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1035 canCombine(MBB, CurrentInstr->getOperand(1)))
1036 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1037}
1038
1039/// Find chains of accumulations that can be rewritten as a tree for increased
1040/// ILP.
1042 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1044 return false;
1045
1046 unsigned Opc = Root.getOpcode();
1048 return false;
1049
1050 // Verify that this is the end of the chain.
1051 MachineBasicBlock &MBB = *Root.getParent();
1052 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1053 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1054 return false;
1055
1056 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1057 if (User->getOpcode() == Opc)
1058 return false;
1059
1060 // Walk up the use chain and collect the reduction chain.
1062 getAccumulatorChain(&Root, Chain);
1063
1064 // Reject chains which are too short to be worth modifying.
1065 if (Chain.size() < MinAccumulatorDepth)
1066 return false;
1067
1068 // Check if the MBB this instruction is a part of contains any other chains.
1069 // If so, don't apply it.
1070 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1071 for (const auto &I : MBB) {
1072 if (I.getOpcode() == Opc &&
1073 !ReductionChain.contains(I.getOperand(0).getReg()))
1074 return false;
1075 }
1076
1078 return true;
1079}
1080
1081// Reduce branches of the accumulator tree by adding them together.
1083 SmallVectorImpl<Register> &RegistersToReduce,
1086 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1087 Register ResultReg) const {
1090
1091 // Get the opcode for the reduction instruction we will need to build.
1092 // If for some reason it is not defined, early exit and don't apply this.
1093 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1094
1095 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1096 auto RHS = RegistersToReduce[i - 1];
1097 auto LHS = RegistersToReduce[i];
1098 Register Dest;
1099 // If we are reducing 2 registers, reuse the original result register.
1100 if (RegistersToReduce.size() == 2)
1101 Dest = ResultReg;
1102 // Otherwise, create a new virtual register to hold the partial sum.
1103 else {
1104 auto NewVR = MRI.createVirtualRegister(
1105 MRI.getRegClass(Root.getOperand(0).getReg()));
1106 Dest = NewVR;
1107 NewRegs.push_back(Dest);
1108 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1109 }
1110
1111 // Create the new reduction instruction.
1113 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1114 .addReg(RHS, getKillRegState(true))
1115 .addReg(LHS, getKillRegState(true));
1116 // Copy any flags needed from the original instruction.
1117 MIB->setFlags(Root.getFlags());
1118 InsInstrs.push_back(MIB);
1119 }
1120
1121 // If the number of registers to reduce is odd, add the remaining register to
1122 // the vector of registers to reduce.
1123 if (RegistersToReduce.size() % 2 != 0)
1124 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1125
1126 RegistersToReduce = std::move(NewRegs);
1127}
1128
1129// The concept of the reassociation pass is that these operations can benefit
1130// from this kind of transformation:
1131//
1132// A = ? op ?
1133// B = A op X (Prev)
1134// C = B op Y (Root)
1135// -->
1136// A = ? op ?
1137// B = X op Y
1138// C = A op B
1139//
1140// breaking the dependency between A and B, allowing them to be executed in
1141// parallel (or back-to-back in a pipeline) instead of depending on each other.
1142
1143// FIXME: This has the potential to be expensive (compile time) while not
1144// improving the code at all. Some ways to limit the overhead:
1145// 1. Track successful transforms; bail out if hit rate gets too low.
1146// 2. Only enable at -O3 or some other non-default optimization level.
1147// 3. Pre-screen pattern candidates here: if an operand of the previous
1148// instruction is known to not increase the critical path, then don't match
1149// that pattern.
1151 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1152 bool DoRegPressureReduce) const {
1153 bool Commute;
1154 if (isReassociationCandidate(Root, Commute)) {
1155 // We found a sequence of instructions that may be suitable for a
1156 // reassociation of operands to increase ILP. Specify each commutation
1157 // possibility for the Prev instruction in the sequence and let the
1158 // machine combiner decide if changing the operands is worthwhile.
1159 if (Commute) {
1162 } else {
1165 }
1166 return true;
1167 }
1168 if (getAccumulatorReassociationPatterns(Root, Patterns))
1169 return true;
1170
1171 return false;
1172}
1173
1174/// Return true when a code sequence can improve loop throughput.
1176 return false;
1177}
1178
1181 switch (Pattern) {
1184 default:
1186 }
1187}
1188
1189std::pair<unsigned, unsigned>
1191 const MachineInstr &Root,
1192 const MachineInstr &Prev) const {
1193 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1194 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1195
1196 // Early exit if both opcodes are associative and commutative. It's a trivial
1197 // reassociation when we only change operands order. In this case opcodes are
1198 // not required to have inverse versions.
1199 if (AssocCommutRoot && AssocCommutPrev) {
1200 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1201 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1202 }
1203
1204 // At least one instruction is not associative or commutative.
1205 // Since we have matched one of the reassociation patterns, we expect that the
1206 // instructions' opcodes are equal or one of them is the inversion of the
1207 // other.
1209 "Incorrectly matched pattern");
1210 unsigned AssocCommutOpcode = Root.getOpcode();
1211 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1212 if (!AssocCommutRoot)
1213 std::swap(AssocCommutOpcode, InverseOpcode);
1214
1215 // The transformation rule (`+` is any associative and commutative binary
1216 // operation, `-` is the inverse):
1217 // REASSOC_AX_BY:
1218 // (A + X) + Y => A + (X + Y)
1219 // (A + X) - Y => A + (X - Y)
1220 // (A - X) + Y => A - (X - Y)
1221 // (A - X) - Y => A - (X + Y)
1222 // REASSOC_XA_BY:
1223 // (X + A) + Y => (X + Y) + A
1224 // (X + A) - Y => (X - Y) + A
1225 // (X - A) + Y => (X + Y) - A
1226 // (X - A) - Y => (X - Y) - A
1227 // REASSOC_AX_YB:
1228 // Y + (A + X) => (Y + X) + A
1229 // Y - (A + X) => (Y - X) - A
1230 // Y + (A - X) => (Y - X) + A
1231 // Y - (A - X) => (Y + X) - A
1232 // REASSOC_XA_YB:
1233 // Y + (X + A) => (Y + X) + A
1234 // Y - (X + A) => (Y - X) - A
1235 // Y + (X - A) => (Y + X) - A
1236 // Y - (X - A) => (Y - X) + A
1237 switch (Pattern) {
1238 default:
1239 llvm_unreachable("Unexpected pattern");
1241 if (!AssocCommutRoot && AssocCommutPrev)
1242 return {AssocCommutOpcode, InverseOpcode};
1243 if (AssocCommutRoot && !AssocCommutPrev)
1244 return {InverseOpcode, InverseOpcode};
1245 if (!AssocCommutRoot && !AssocCommutPrev)
1246 return {InverseOpcode, AssocCommutOpcode};
1247 break;
1249 if (!AssocCommutRoot && AssocCommutPrev)
1250 return {AssocCommutOpcode, InverseOpcode};
1251 if (AssocCommutRoot && !AssocCommutPrev)
1252 return {InverseOpcode, AssocCommutOpcode};
1253 if (!AssocCommutRoot && !AssocCommutPrev)
1254 return {InverseOpcode, InverseOpcode};
1255 break;
1257 if (!AssocCommutRoot && AssocCommutPrev)
1258 return {InverseOpcode, InverseOpcode};
1259 if (AssocCommutRoot && !AssocCommutPrev)
1260 return {AssocCommutOpcode, InverseOpcode};
1261 if (!AssocCommutRoot && !AssocCommutPrev)
1262 return {InverseOpcode, AssocCommutOpcode};
1263 break;
1265 if (!AssocCommutRoot && AssocCommutPrev)
1266 return {InverseOpcode, InverseOpcode};
1267 if (AssocCommutRoot && !AssocCommutPrev)
1268 return {InverseOpcode, AssocCommutOpcode};
1269 if (!AssocCommutRoot && !AssocCommutPrev)
1270 return {AssocCommutOpcode, InverseOpcode};
1271 break;
1272 }
1273 llvm_unreachable("Unhandled combination");
1274}
1275
1276// Return a pair of boolean flags showing if the new root and new prev operands
1277// must be swapped. See visual example of the rule in
1278// TargetInstrInfo::getReassociationOpcodes.
1279static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1280 switch (Pattern) {
1281 default:
1282 llvm_unreachable("Unexpected pattern");
1284 return {false, false};
1286 return {true, false};
1288 return {true, true};
1290 return {true, true};
1291 }
1292}
1293
1295 const MachineInstr &Root, unsigned Pattern,
1296 std::array<unsigned, 5> &OperandIndices) const {
1297 switch (Pattern) {
1299 OperandIndices = {1, 1, 1, 2, 2};
1300 break;
1302 OperandIndices = {2, 1, 2, 2, 1};
1303 break;
1305 OperandIndices = {1, 2, 1, 1, 2};
1306 break;
1308 OperandIndices = {2, 2, 2, 1, 1};
1309 break;
1310 default:
1311 llvm_unreachable("unexpected MachineCombinerPattern");
1312 }
1313}
1314
1315/// Attempt the reassociation transformation to reduce critical path length.
1316/// See the above comments before getMachineCombinerPatterns().
1318 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1322 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1323 MachineFunction *MF = Root.getMF();
1324 MachineRegisterInfo &MRI = MF->getRegInfo();
1326 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, &TRI);
1327
1332 MachineOperand &OpC = Root.getOperand(0);
1333
1334 Register RegA = OpA.getReg();
1335 unsigned SubRegA = OpA.getSubReg();
1336 Register RegB = OpB.getReg();
1337 Register RegX = OpX.getReg();
1338 unsigned SubRegX = OpX.getSubReg();
1339 Register RegY = OpY.getReg();
1340 unsigned SubRegY = OpY.getSubReg();
1341 Register RegC = OpC.getReg();
1342
1343 if (RegA.isVirtual())
1344 MRI.constrainRegClass(RegA, RC);
1345 if (RegB.isVirtual())
1346 MRI.constrainRegClass(RegB, RC);
1347 if (RegX.isVirtual())
1348 MRI.constrainRegClass(RegX, RC);
1349 if (RegY.isVirtual())
1350 MRI.constrainRegClass(RegY, RC);
1351 if (RegC.isVirtual())
1352 MRI.constrainRegClass(RegC, RC);
1353
1354 // Create a new virtual register for the result of (X op Y) instead of
1355 // recycling RegB because the MachineCombiner's computation of the critical
1356 // path requires a new register definition rather than an existing one.
1357 Register NewVR = MRI.createVirtualRegister(RC);
1358 unsigned SubRegNewVR = 0;
1359 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1360
1361 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1362 bool KillA = OpA.isKill();
1363 bool KillX = OpX.isKill();
1364 bool KillY = OpY.isKill();
1365 bool KillNewVR = true;
1366
1367 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1368
1369 if (SwapPrevOperands) {
1370 std::swap(RegX, RegY);
1371 std::swap(SubRegX, SubRegY);
1372 std::swap(KillX, KillY);
1373 }
1374
1375 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1376 unsigned RootFirstOpIdx, RootSecondOpIdx;
1377 switch (Pattern) {
1379 PrevFirstOpIdx = OperandIndices[1];
1380 PrevSecondOpIdx = OperandIndices[3];
1381 RootFirstOpIdx = OperandIndices[2];
1382 RootSecondOpIdx = OperandIndices[4];
1383 break;
1385 PrevFirstOpIdx = OperandIndices[1];
1386 PrevSecondOpIdx = OperandIndices[3];
1387 RootFirstOpIdx = OperandIndices[4];
1388 RootSecondOpIdx = OperandIndices[2];
1389 break;
1391 PrevFirstOpIdx = OperandIndices[3];
1392 PrevSecondOpIdx = OperandIndices[1];
1393 RootFirstOpIdx = OperandIndices[2];
1394 RootSecondOpIdx = OperandIndices[4];
1395 break;
1397 PrevFirstOpIdx = OperandIndices[3];
1398 PrevSecondOpIdx = OperandIndices[1];
1399 RootFirstOpIdx = OperandIndices[4];
1400 RootSecondOpIdx = OperandIndices[2];
1401 break;
1402 default:
1403 llvm_unreachable("unexpected MachineCombinerPattern");
1404 }
1405
1406 // Basically BuildMI but doesn't add implicit operands by default.
1407 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1408 const MCInstrDesc &MCID, Register DestReg) {
1409 return MachineInstrBuilder(
1410 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1411 .copyMIMetadata(MIMD)
1412 .addReg(DestReg, RegState::Define);
1413 };
1414
1415 // Create new instructions for insertion.
1416 MachineInstrBuilder MIB1 =
1417 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1418 for (const auto &MO : Prev.explicit_operands()) {
1419 unsigned Idx = MO.getOperandNo();
1420 // Skip the result operand we'd already added.
1421 if (Idx == 0)
1422 continue;
1423 if (Idx == PrevFirstOpIdx)
1424 MIB1.addReg(RegX, getKillRegState(KillX), SubRegX);
1425 else if (Idx == PrevSecondOpIdx)
1426 MIB1.addReg(RegY, getKillRegState(KillY), SubRegY);
1427 else
1428 MIB1.add(MO);
1429 }
1430 MIB1.copyImplicitOps(Prev);
1431
1432 if (SwapRootOperands) {
1433 std::swap(RegA, NewVR);
1434 std::swap(SubRegA, SubRegNewVR);
1435 std::swap(KillA, KillNewVR);
1436 }
1437
1438 MachineInstrBuilder MIB2 =
1439 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1440 for (const auto &MO : Root.explicit_operands()) {
1441 unsigned Idx = MO.getOperandNo();
1442 // Skip the result operand.
1443 if (Idx == 0)
1444 continue;
1445 if (Idx == RootFirstOpIdx)
1446 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA), SubRegA);
1447 else if (Idx == RootSecondOpIdx)
1448 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR), SubRegNewVR);
1449 else
1450 MIB2 = MIB2.add(MO);
1451 }
1452 MIB2.copyImplicitOps(Root);
1453
1454 // Propagate FP flags from the original instructions.
1455 // But clear poison-generating flags because those may not be valid now.
1456 // TODO: There should be a helper function for copying only fast-math-flags.
1457 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1458 MIB1->setFlags(IntersectedFlags);
1463
1464 MIB2->setFlags(IntersectedFlags);
1469
1470 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1471
1472 // Record new instructions for insertion and old instructions for deletion.
1473 InsInstrs.push_back(MIB1);
1474 InsInstrs.push_back(MIB2);
1475 DelInstrs.push_back(&Prev);
1476 DelInstrs.push_back(&Root);
1477
1478 // We transformed:
1479 // B = A op X (Prev)
1480 // C = B op Y (Root)
1481 // Into:
1482 // B = X op Y (MIB1)
1483 // C = A op B (MIB2)
1484 // C has the same value as before, B doesn't; as such, keep the debug number
1485 // of C but not of B.
1486 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1487 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1488}
1489
1491 MachineInstr &Root, unsigned Pattern,
1494 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1495 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1496 MachineBasicBlock &MBB = *Root.getParent();
1497 MachineFunction &MF = *MBB.getParent();
1499
1500 switch (Pattern) {
1505 // Select the previous instruction in the sequence based on the input
1506 // pattern.
1507 std::array<unsigned, 5> OperandIndices;
1509 MachineInstr *Prev =
1511
1512 // Don't reassociate if Prev and Root are in different blocks.
1513 if (Prev->getParent() != Root.getParent())
1514 return;
1515
1516 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1517 InstIdxForVirtReg);
1518 break;
1519 }
1521 SmallVector<Register, 32> ChainRegs;
1522 getAccumulatorChain(&Root, ChainRegs);
1523 unsigned int Depth = ChainRegs.size();
1525 "Max accumulator width set to illegal value");
1526 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1527 ? Log2_32(Depth)
1529
1530 // Walk down the chain and rewrite it as a tree.
1531 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1532 // No need to rewrite the first node, it is already perfect as it is.
1533 if (IndexedReg.index() == 0)
1534 continue;
1535
1536 // FIXME: Losing subregisters
1537 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1539 Register AccReg;
1540 if (IndexedReg.index() < MaxWidth) {
1541 // Now we need to create new instructions for the first row.
1542 AccReg = Instr->getOperand(0).getReg();
1543 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1544
1545 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1546 .addReg(Instr->getOperand(2).getReg(),
1547 getKillRegState(Instr->getOperand(2).isKill()))
1548 .addReg(Instr->getOperand(3).getReg(),
1549 getKillRegState(Instr->getOperand(3).isKill()));
1550 } else {
1551 // For the remaining cases, we need to use an output register of one of
1552 // the newly inserted instuctions as operand 1
1553 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1555 MRI.getRegClass(Root.getOperand(0).getReg()))
1556 : Instr->getOperand(0).getReg();
1557 assert(IndexedReg.index() >= MaxWidth);
1558 auto AccumulatorInput =
1559 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1560 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1561 AccReg)
1562 .addReg(AccumulatorInput, getKillRegState(true))
1563 .addReg(Instr->getOperand(2).getReg(),
1564 getKillRegState(Instr->getOperand(2).isKill()))
1565 .addReg(Instr->getOperand(3).getReg(),
1566 getKillRegState(Instr->getOperand(3).isKill()));
1567 }
1568
1569 MIB->setFlags(Instr->getFlags());
1570 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1571 InsInstrs.push_back(MIB);
1572 DelInstrs.push_back(Instr);
1573 }
1574
1575 SmallVector<Register, 8> RegistersToReduce;
1576 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1577 ++i) {
1578 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1579 RegistersToReduce.push_back(Reg);
1580 }
1581
1582 while (RegistersToReduce.size() > 1)
1583 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1584 InstIdxForVirtReg, Root.getOperand(0).getReg());
1585
1586 break;
1587 }
1588 }
1589}
1590
1594
1596 const MachineInstr &MI) const {
1597 const MachineFunction &MF = *MI.getMF();
1598 const MachineRegisterInfo &MRI = MF.getRegInfo();
1599
1600 // Remat clients assume operand 0 is the defined register.
1601 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1602 return false;
1603 Register DefReg = MI.getOperand(0).getReg();
1604
1605 // A sub-register definition can only be rematerialized if the instruction
1606 // doesn't read the other parts of the register. Otherwise it is really a
1607 // read-modify-write operation on the full virtual register which cannot be
1608 // moved safely.
1609 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1610 MI.readsVirtualRegister(DefReg))
1611 return false;
1612
1613 // A load from a fixed stack slot can be rematerialized. This may be
1614 // redundant with subsequent checks, but it's target-independent,
1615 // simple, and a common case.
1616 int FrameIdx = 0;
1617 if (isLoadFromStackSlot(MI, FrameIdx) &&
1618 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1619 return true;
1620
1621 // Avoid instructions obviously unsafe for remat.
1622 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1623 MI.hasUnmodeledSideEffects())
1624 return false;
1625
1626 // Don't remat inline asm. We have no idea how expensive it is
1627 // even if it's side effect free.
1628 if (MI.isInlineAsm())
1629 return false;
1630
1631 // Avoid instructions which load from potentially varying memory.
1632 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1633 return false;
1634
1635 // If any of the registers accessed are non-constant, conservatively assume
1636 // the instruction is not rematerializable.
1637 for (const MachineOperand &MO : MI.operands()) {
1638 if (!MO.isReg()) continue;
1639 Register Reg = MO.getReg();
1640 if (Reg == 0)
1641 continue;
1642
1643 // Check for a well-behaved physical register.
1644 if (Reg.isPhysical()) {
1645 if (MO.isUse()) {
1646 // If the physreg has no defs anywhere, it's just an ambient register
1647 // and we can freely move its uses. Alternatively, if it's allocatable,
1648 // it could get allocated to something with a def during allocation.
1649 if (!MRI.isConstantPhysReg(Reg))
1650 return false;
1651 } else {
1652 // A physreg def. We can't remat it.
1653 return false;
1654 }
1655 continue;
1656 }
1657
1658 // Only allow one virtual-register def. There may be multiple defs of the
1659 // same virtual register, though.
1660 if (MO.isDef() && Reg != DefReg)
1661 return false;
1662 }
1663
1664 // Everything checked out.
1665 return true;
1666}
1667
1669 const MachineFunction *MF = MI.getMF();
1671 bool StackGrowsDown =
1673
1674 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1675 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1676
1677 if (!isFrameInstr(MI))
1678 return 0;
1679
1680 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1681
1682 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1683 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1684 SPAdj = -SPAdj;
1685
1686 return SPAdj;
1687}
1688
1689/// isSchedulingBoundary - Test if the given instruction should be
1690/// considered a scheduling boundary. This primarily includes labels
1691/// and terminators.
1693 const MachineBasicBlock *MBB,
1694 const MachineFunction &MF) const {
1695 // Terminators and labels can't be scheduled around.
1696 if (MI.isTerminator() || MI.isPosition())
1697 return true;
1698
1699 // INLINEASM_BR can jump to another block
1700 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1701 return true;
1702
1703 // Don't attempt to schedule around any instruction that defines
1704 // a stack-oriented pointer, as it's unlikely to be profitable. This
1705 // saves compile time, because it doesn't require every single
1706 // stack slot reference to depend on the instruction that does the
1707 // modification.
1708 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1709 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), &TRI);
1710}
1711
1712// Provide a global flag for disabling the PreRA hazard recognizer that targets
1713// may choose to honor.
1717
1718// Default implementation of CreateTargetRAHazardRecognizer.
1721 const ScheduleDAG *DAG) const {
1722 // Dummy hazard recognizer allows all instructions to issue.
1723 return new ScheduleHazardRecognizer();
1724}
1725
1726// Default implementation of CreateTargetMIHazardRecognizer.
1728 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1729 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1730}
1731
1732// Default implementation of CreateTargetPostRAHazardRecognizer.
1738
1739// Default implementation of getMemOperandWithOffset.
1741 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1742 bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
1745 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1746 Width, &TRI) ||
1747 BaseOps.size() != 1)
1748 return false;
1749 BaseOp = BaseOps.front();
1750 return true;
1751}
1752
1753//===----------------------------------------------------------------------===//
1754// SelectionDAG latency interface.
1755//===----------------------------------------------------------------------===//
1756
1757std::optional<unsigned>
1759 SDNode *DefNode, unsigned DefIdx,
1760 SDNode *UseNode, unsigned UseIdx) const {
1761 if (!ItinData || ItinData->isEmpty())
1762 return std::nullopt;
1763
1764 if (!DefNode->isMachineOpcode())
1765 return std::nullopt;
1766
1767 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1768 if (!UseNode->isMachineOpcode())
1769 return ItinData->getOperandCycle(DefClass, DefIdx);
1770 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1771 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1772}
1773
1775 SDNode *N) const {
1776 if (!ItinData || ItinData->isEmpty())
1777 return 1;
1778
1779 if (!N->isMachineOpcode())
1780 return 1;
1781
1782 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1783}
1784
1785//===----------------------------------------------------------------------===//
1786// MachineInstr latency interface.
1787//===----------------------------------------------------------------------===//
1788
1790 const MachineInstr &MI) const {
1791 if (!ItinData || ItinData->isEmpty())
1792 return 1;
1793
1794 unsigned Class = MI.getDesc().getSchedClass();
1795 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1796 if (UOps >= 0)
1797 return UOps;
1798
1799 // The # of u-ops is dynamically determined. The specific target should
1800 // override this function to return the right number.
1801 return 1;
1802}
1803
1804/// Return the default expected latency for a def based on it's opcode.
1806 const MachineInstr &DefMI) const {
1807 if (DefMI.isTransient())
1808 return 0;
1809 if (DefMI.mayLoad())
1810 return SchedModel.LoadLatency;
1811 if (isHighLatencyDef(DefMI.getOpcode()))
1812 return SchedModel.HighLatency;
1813 return 1;
1814}
1815
1817 return 0;
1818}
1819
1821 const MachineInstr &MI,
1822 unsigned *PredCost) const {
1823 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1824 // still have a MinLatency property, which getStageLatency checks.
1825 if (!ItinData)
1826 return MI.mayLoad() ? 2 : 1;
1827
1828 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1829}
1830
1832 const MachineInstr &DefMI,
1833 unsigned DefIdx) const {
1834 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1835 if (!ItinData || ItinData->isEmpty())
1836 return false;
1837
1838 unsigned DefClass = DefMI.getDesc().getSchedClass();
1839 std::optional<unsigned> DefCycle =
1840 ItinData->getOperandCycle(DefClass, DefIdx);
1841 return DefCycle && DefCycle <= 1U;
1842}
1843
1845 // TODO: We don't split functions where a section attribute has been set
1846 // since the split part may not be placed in a contiguous region. It may also
1847 // be more beneficial to augment the linker to ensure contiguous layout of
1848 // split functions within the same section as specified by the attribute.
1849 if (MF.getFunction().hasSection())
1850 return false;
1851
1852 // We don't want to proceed further for cold functions
1853 // or functions of unknown hotness. Lukewarm functions have no prefix.
1854 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1855 if (SectionPrefix &&
1856 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1857 return false;
1858 }
1859
1860 return true;
1861}
1862
1863std::optional<ParamLoadedValue>
1865 Register Reg) const {
1866 const MachineFunction *MF = MI.getMF();
1868 int64_t Offset;
1869 bool OffsetIsScalable;
1870
1871 // To simplify the sub-register handling, verify that we only need to
1872 // consider physical registers.
1873 assert(MF->getProperties().hasNoVRegs());
1874
1875 if (auto DestSrc = isCopyInstr(MI)) {
1876 Register DestReg = DestSrc->Destination->getReg();
1877
1878 // If the copy destination is the forwarding reg, describe the forwarding
1879 // reg using the copy source as the backup location. Example:
1880 //
1881 // x0 = MOV x7
1882 // call callee(x0) ; x0 described as x7
1883 if (Reg == DestReg)
1884 return ParamLoadedValue(*DestSrc->Source, Expr);
1885
1886 // If the target's hook couldn't describe this copy, give up.
1887 return std::nullopt;
1888 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1889 Register SrcReg = RegImm->Reg;
1890 Offset = RegImm->Imm;
1892 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1893 } else if (MI.hasOneMemOperand()) {
1894 // Only describe memory which provably does not escape the function. As
1895 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1896 // callee (or by another thread).
1897 const MachineFrameInfo &MFI = MF->getFrameInfo();
1898 const MachineMemOperand *MMO = MI.memoperands()[0];
1899 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1900
1901 // If the address points to "special" memory (e.g. a spill slot), it's
1902 // sufficient to check that it isn't aliased by any high-level IR value.
1903 if (!PSV || PSV->mayAlias(&MFI))
1904 return std::nullopt;
1905
1906 const MachineOperand *BaseOp;
1907 if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
1908 return std::nullopt;
1909
1910 // FIXME: Scalable offsets are not yet handled in the offset code below.
1911 if (OffsetIsScalable)
1912 return std::nullopt;
1913
1914 // TODO: Can currently only handle mem instructions with a single define.
1915 // An example from the x86 target:
1916 // ...
1917 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1918 // ...
1919 //
1920 if (MI.getNumExplicitDefs() != 1)
1921 return std::nullopt;
1922
1923 // TODO: In what way do we need to take Reg into consideration here?
1924
1927 Ops.push_back(dwarf::DW_OP_deref_size);
1928 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1929 : ~UINT64_C(0));
1930 Expr = DIExpression::prependOpcodes(Expr, Ops);
1931 return ParamLoadedValue(*BaseOp, Expr);
1932 }
1933
1934 return std::nullopt;
1935}
1936
1937// Get the call frame size just before MI.
1939 // Search backwards from MI for the most recent call frame instruction.
1940 MachineBasicBlock *MBB = MI.getParent();
1941 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1942 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1943 return getFrameTotalSize(AdjI);
1944 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1945 return 0;
1946 }
1947
1948 // If none was found, use the call frame size from the start of the basic
1949 // block.
1950 return MBB->getCallFrameSize();
1951}
1952
1953/// Both DefMI and UseMI must be valid. By default, call directly to the
1954/// itinerary. This may be overriden by the target.
1956 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1957 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1958 unsigned DefClass = DefMI.getDesc().getSchedClass();
1959 unsigned UseClass = UseMI.getDesc().getSchedClass();
1960 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1961}
1962
1964 const MachineInstr &MI, unsigned DefIdx,
1965 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1966 assert((MI.isRegSequence() ||
1967 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1968
1969 if (!MI.isRegSequence())
1970 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1971
1972 // We are looking at:
1973 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1974 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1975 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1976 OpIdx += 2) {
1977 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1978 if (MOReg.isUndef())
1979 continue;
1980 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1981 assert(MOSubIdx.isImm() &&
1982 "One of the subindex of the reg_sequence is not an immediate");
1983 // Record Reg:SubReg, SubIdx.
1984 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1985 (unsigned)MOSubIdx.getImm()));
1986 }
1987 return true;
1988}
1989
1991 const MachineInstr &MI, unsigned DefIdx,
1992 RegSubRegPairAndIdx &InputReg) const {
1993 assert((MI.isExtractSubreg() ||
1994 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1995
1996 if (!MI.isExtractSubreg())
1997 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1998
1999 // We are looking at:
2000 // Def = EXTRACT_SUBREG v0.sub1, sub0.
2001 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
2002 const MachineOperand &MOReg = MI.getOperand(1);
2003 if (MOReg.isUndef())
2004 return false;
2005 const MachineOperand &MOSubIdx = MI.getOperand(2);
2006 assert(MOSubIdx.isImm() &&
2007 "The subindex of the extract_subreg is not an immediate");
2008
2009 InputReg.Reg = MOReg.getReg();
2010 InputReg.SubReg = MOReg.getSubReg();
2011 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2012 return true;
2013}
2014
2016 const MachineInstr &MI, unsigned DefIdx,
2017 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2018 assert((MI.isInsertSubreg() ||
2019 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2020
2021 if (!MI.isInsertSubreg())
2022 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2023
2024 // We are looking at:
2025 // Def = INSERT_SEQUENCE v0, v1, sub0.
2026 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2027 const MachineOperand &MOBaseReg = MI.getOperand(1);
2028 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2029 if (MOInsertedReg.isUndef())
2030 return false;
2031 const MachineOperand &MOSubIdx = MI.getOperand(3);
2032 assert(MOSubIdx.isImm() &&
2033 "One of the subindex of the reg_sequence is not an immediate");
2034 BaseReg.Reg = MOBaseReg.getReg();
2035 BaseReg.SubReg = MOBaseReg.getSubReg();
2036
2037 InsertedReg.Reg = MOInsertedReg.getReg();
2038 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2039 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2040 return true;
2041}
2042
2043// Returns a MIRPrinter comment for this machine operand.
2045 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2046 const TargetRegisterInfo * /*RemoveMe*/) const {
2047
2048 if (!MI.isInlineAsm())
2049 return "";
2050
2051 std::string Flags;
2052 raw_string_ostream OS(Flags);
2053
2055 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2056 unsigned ExtraInfo = Op.getImm();
2057 OS << interleaved(InlineAsm::getExtraInfoNames(ExtraInfo), " ");
2058 return Flags;
2059 }
2060
2061 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2062 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2063 return "";
2064
2065 assert(Op.isImm() && "Expected flag operand to be an immediate");
2066 // Pretty print the inline asm operand descriptor.
2067 unsigned Flag = Op.getImm();
2068 const InlineAsm::Flag F(Flag);
2069 OS << F.getKindName();
2070
2071 unsigned RCID;
2072 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID))
2073 OS << ':' << TRI.getRegClassName(TRI.getRegClass(RCID));
2074
2075 if (F.isMemKind()) {
2076 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2078 }
2079
2080 unsigned TiedTo;
2081 if (F.isUseOperandTiedToDef(TiedTo))
2082 OS << " tiedto:$" << TiedTo;
2083
2084 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2085 F.getRegMayBeFolded())
2086 OS << " foldable";
2087
2088 return Flags;
2089}
2090
2092
2094 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2095 // Include target features from an arbitrary candidate for the outlined
2096 // function. This makes sure the outlined function knows what kinds of
2097 // instructions are going into it. This is fine, since all parent functions
2098 // must necessarily support the instructions that are in the outlined region.
2099 outliner::Candidate &FirstCand = Candidates.front();
2100 const Function &ParentFn = FirstCand.getMF()->getFunction();
2101 if (ParentFn.hasFnAttribute("target-features"))
2102 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2103 if (ParentFn.hasFnAttribute("target-cpu"))
2104 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2105
2106 // Set nounwind, so we don't generate eh_frame.
2107 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2108 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2109 }))
2110 F.addFnAttr(Attribute::NoUnwind);
2111}
2112
2116 unsigned Flags) const {
2117 MachineInstr &MI = *MIT;
2118
2119 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2120 // have support for outlining those. Special-case that here.
2121 if (MI.isCFIInstruction())
2122 // Just go right to the target implementation.
2123 return getOutliningTypeImpl(MMI, MIT, Flags);
2124
2125 // Be conservative about inline assembly.
2126 if (MI.isInlineAsm())
2128
2129 // Labels generally can't safely be outlined.
2130 if (MI.isLabel())
2132
2133 // Don't let debug instructions impact analysis.
2134 if (MI.isDebugInstr())
2136
2137 // Some other special cases.
2138 switch (MI.getOpcode()) {
2139 case TargetOpcode::IMPLICIT_DEF:
2140 case TargetOpcode::KILL:
2141 case TargetOpcode::LIFETIME_START:
2142 case TargetOpcode::LIFETIME_END:
2144 default:
2145 break;
2146 }
2147
2148 // Is this a terminator for a basic block?
2149 if (MI.isTerminator()) {
2150 // If this is a branch to another block, we can't outline it.
2151 if (!MI.getParent()->succ_empty())
2153
2154 // Don't outline if the branch is not unconditional.
2155 if (isPredicated(MI))
2157 }
2158
2159 // Make sure none of the operands of this instruction do anything that
2160 // might break if they're moved outside their current function.
2161 // This includes MachineBasicBlock references, BlockAddressses,
2162 // Constant pool indices and jump table indices.
2163 //
2164 // A quick note on MO_TargetIndex:
2165 // This doesn't seem to be used in any of the architectures that the
2166 // MachineOutliner supports, but it was still filtered out in all of them.
2167 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2168 // As such, this check is removed both here and in the target-specific
2169 // implementations. Instead, we assert to make sure this doesn't
2170 // catch anyone off-guard somewhere down the line.
2171 for (const MachineOperand &MOP : MI.operands()) {
2172 // If you hit this assertion, please remove it and adjust
2173 // `getOutliningTypeImpl` for your target appropriately if necessary.
2174 // Adding the assertion back to other supported architectures
2175 // would be nice too :)
2176 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2177
2178 // CFI instructions should already have been filtered out at this point.
2179 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2180
2181 // PrologEpilogInserter should've already run at this point.
2182 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2183
2184 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2186 }
2187
2188 // If we don't know, delegate to the target-specific hook.
2189 return getOutliningTypeImpl(MMI, MIT, Flags);
2190}
2191
2193 unsigned &Flags) const {
2194 // Some instrumentations create special TargetOpcode at the start which
2195 // expands to special code sequences which must be present.
2196 auto First = MBB.getFirstNonDebugInstr();
2197 if (First == MBB.end())
2198 return true;
2199
2200 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2201 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2202 return false;
2203
2204 // Some instrumentations create special pseudo-instructions at or just before
2205 // the end that must be present.
2206 auto Last = MBB.getLastNonDebugInstr();
2207 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2208 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2209 return false;
2210
2211 if (Last != First && Last->isReturn()) {
2212 --Last;
2213 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2214 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2215 return false;
2216 }
2217 return true;
2218}
2219
2221 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2222 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2223}
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:215
A debug info location.
Definition DebugLoc.h:123
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:763
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:318
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:451
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:475
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:534
StringRef getCommentString() const
Definition MCAsmInfo.h:545
const char * getSeparatorString() const
Definition MCAsmInfo.h:540
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI bool hasOneNonDBGUser(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
const TargetRegisterInfo & TRI
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI, MachineInstr *&CopyMI) const
Try to remove the load by folding it to a register operand at the use.
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, LaneBitmask UsedLanes=LaneBitmask::getAll()) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:646
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
@ Length
Definition DWP.cpp:532
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
constexpr RegState getKillRegState(bool B)
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const