LLVM 23.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
40
41using namespace llvm;
42
44 "disable-sched-hazard", cl::Hidden, cl::init(false),
45 cl::desc("Disable hazard detection during preRA scheduling"));
46
48 "acc-reassoc", cl::Hidden, cl::init(true),
49 cl::desc("Enable reassociation of accumulation chains"));
50
53 cl::desc("Minimum length of accumulator chains "
54 "required for the optimization to kick in"));
55
57 "acc-max-width", cl::Hidden, cl::init(3),
58 cl::desc("Maximum number of branches in the accumulator tree"));
59
61
63 unsigned OpNum) const {
64 if (OpNum >= MCID.getNumOperands())
65 return nullptr;
66
67 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
68 int16_t RegClass = getOpRegClassID(OpInfo);
69
70 // Instructions like INSERT_SUBREG do not have fixed register classes.
71 if (RegClass < 0)
72 return nullptr;
73
74 // Otherwise just look it up normally.
75 return TRI.getRegClass(RegClass);
76}
77
78/// insertNoop - Insert a noop into the instruction stream at the specified
79/// point.
82 llvm_unreachable("Target didn't implement insertNoop!");
83}
84
85/// insertNoops - Insert noops into the instruction stream at the specified
86/// point.
89 unsigned Quantity) const {
90 for (unsigned i = 0; i < Quantity; ++i)
92}
93
94static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
95 return strncmp(Str, MAI.getCommentString().data(),
96 MAI.getCommentString().size()) == 0;
97}
98
99/// Measure the specified inline asm to determine an approximation of its
100/// length.
101/// Comments (which run till the next SeparatorString or newline) do not
102/// count as an instruction.
103/// Any other non-whitespace text is considered an instruction, with
104/// multiple instructions separated by SeparatorString or newlines.
105/// Variable-length instructions are not handled here; this function
106/// may be overloaded in the target code to do that.
107/// We implement a special case of the .space directive which takes only a
108/// single integer argument in base 10 that is the size in bytes. This is a
109/// restricted form of the GAS directive in that we only interpret
110/// simple--i.e. not a logical or arithmetic expression--size values without
111/// the optional fill value. This is primarily used for creating arbitrary
112/// sized inline asm blocks for testing purposes.
114 const char *Str,
115 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
116 // Count the number of instructions in the asm.
117 bool AtInsnStart = true;
118 unsigned Length = 0;
119 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
120 for (; *Str; ++Str) {
121 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
122 strlen(MAI.getSeparatorString())) == 0) {
123 AtInsnStart = true;
124 } else if (isAsmComment(Str, MAI)) {
125 // Stop counting as an instruction after a comment until the next
126 // separator.
127 AtInsnStart = false;
128 }
129
130 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
131 unsigned AddLength = MaxInstLength;
132 if (strncmp(Str, ".space", 6) == 0) {
133 char *EStr;
134 int SpaceSize;
135 SpaceSize = strtol(Str + 6, &EStr, 10);
136 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
137 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
138 ++EStr;
139 if (*EStr == '\0' || *EStr == '\n' ||
140 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
141 AddLength = SpaceSize;
142 }
143 Length += AddLength;
144 AtInsnStart = false;
145 }
146 }
147
148 return Length;
149}
150
151/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
152/// after it, replacing it with an unconditional branch to NewDest.
153void
155 MachineBasicBlock *NewDest) const {
156 MachineBasicBlock *MBB = Tail->getParent();
157
158 // Remove all the old successors of MBB from the CFG.
159 while (!MBB->succ_empty())
160 MBB->removeSuccessor(MBB->succ_begin());
161
162 // Save off the debug loc before erasing the instruction.
163 DebugLoc DL = Tail->getDebugLoc();
164
165 // Update call info and remove all the dead instructions
166 // from the end of MBB.
167 while (Tail != MBB->end()) {
168 auto MI = Tail++;
169 if (MI->shouldUpdateAdditionalCallInfo())
170 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
171 MBB->erase(MI);
172 }
173
174 // If MBB isn't immediately before MBB, insert a branch to it.
176 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
177 MBB->addSuccessor(NewDest);
178}
179
181 bool NewMI, unsigned Idx1,
182 unsigned Idx2) const {
183 const MCInstrDesc &MCID = MI.getDesc();
184 bool HasDef = MCID.getNumDefs();
185 if (HasDef && !MI.getOperand(0).isReg())
186 // No idea how to commute this instruction. Target should implement its own.
187 return nullptr;
188
189 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
190 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
191 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
192 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
193 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
194 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
195 "This only knows how to commute register operands so far");
196
197 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
198 Register Reg1 = MI.getOperand(Idx1).getReg();
199 Register Reg2 = MI.getOperand(Idx2).getReg();
200 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
201 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
202 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
203 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
204 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
205 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
206 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
207 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
208 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
209 // Avoid calling isRenamable for virtual registers since we assert that
210 // renamable property is only queried/set for physical registers.
211 bool Reg1IsRenamable =
212 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
213 bool Reg2IsRenamable =
214 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
215
216 // For a case like this:
217 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
218 // we need to update the implicit-def after commuting to result in:
219 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
220 SmallVector<unsigned> UpdateImplicitDefIdx;
221 if (HasDef && MI.hasImplicitDef()) {
222 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
223 Register ImplReg = MO.getReg();
224 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
225 (ImplReg.isPhysical() && Reg0.isPhysical() &&
226 TRI.isSubRegisterEq(ImplReg, Reg0)))
227 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
228 }
229 }
230
231 // If destination is tied to either of the commuted source register, then
232 // it must be updated.
233 if (HasDef && Reg0 == Reg1 &&
234 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
235 Reg2IsKill = false;
236 Reg0 = Reg2;
237 SubReg0 = SubReg2;
238 } else if (HasDef && Reg0 == Reg2 &&
239 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
240 Reg1IsKill = false;
241 Reg0 = Reg1;
242 SubReg0 = SubReg1;
243 }
244
245 MachineInstr *CommutedMI = nullptr;
246 if (NewMI) {
247 // Create a new instruction.
248 MachineFunction &MF = *MI.getMF();
249 CommutedMI = MF.CloneMachineInstr(&MI);
250 } else {
251 CommutedMI = &MI;
252 }
253
254 if (HasDef) {
255 CommutedMI->getOperand(0).setReg(Reg0);
256 CommutedMI->getOperand(0).setSubReg(SubReg0);
257 for (unsigned Idx : UpdateImplicitDefIdx)
258 CommutedMI->getOperand(Idx).setReg(Reg0);
259 }
260 CommutedMI->getOperand(Idx2).setReg(Reg1);
261 CommutedMI->getOperand(Idx1).setReg(Reg2);
262 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
263 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
264 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
265 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
266 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
267 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
268 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
269 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
270 // Avoid calling setIsRenamable for virtual registers since we assert that
271 // renamable property is only queried/set for physical registers.
272 if (Reg1.isPhysical())
273 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
274 if (Reg2.isPhysical())
275 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
276 return CommutedMI;
277}
278
280 unsigned OpIdx1,
281 unsigned OpIdx2) const {
282 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
283 // any commutable operand, which is done in findCommutedOpIndices() method
284 // called below.
285 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
286 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
287 assert(MI.isCommutable() &&
288 "Precondition violation: MI must be commutable.");
289 return nullptr;
290 }
291 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
292}
293
295 unsigned &ResultIdx2,
296 unsigned CommutableOpIdx1,
297 unsigned CommutableOpIdx2) {
298 if (ResultIdx1 == CommuteAnyOperandIndex &&
299 ResultIdx2 == CommuteAnyOperandIndex) {
300 ResultIdx1 = CommutableOpIdx1;
301 ResultIdx2 = CommutableOpIdx2;
302 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
303 if (ResultIdx2 == CommutableOpIdx1)
304 ResultIdx1 = CommutableOpIdx2;
305 else if (ResultIdx2 == CommutableOpIdx2)
306 ResultIdx1 = CommutableOpIdx1;
307 else
308 return false;
309 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
310 if (ResultIdx1 == CommutableOpIdx1)
311 ResultIdx2 = CommutableOpIdx2;
312 else if (ResultIdx1 == CommutableOpIdx2)
313 ResultIdx2 = CommutableOpIdx1;
314 else
315 return false;
316 } else
317 // Check that the result operand indices match the given commutable
318 // operand indices.
319 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
320 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
321
322 return true;
323}
324
326 unsigned &SrcOpIdx1,
327 unsigned &SrcOpIdx2) const {
328 assert(!MI.isBundle() &&
329 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
330
331 const MCInstrDesc &MCID = MI.getDesc();
332 if (!MCID.isCommutable())
333 return false;
334
335 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
336 // is not true, then the target must implement this.
337 unsigned CommutableOpIdx1 = MCID.getNumDefs();
338 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
339 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
340 CommutableOpIdx1, CommutableOpIdx2))
341 return false;
342
343 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
344 // No idea.
345 return false;
346 return true;
347}
348
350 if (!MI.isTerminator()) return false;
351
352 // Conditional branch is a special case.
353 if (MI.isBranch() && !MI.isBarrier())
354 return true;
355 if (!MI.isPredicable())
356 return true;
357 return !isPredicated(MI);
358}
359
362 bool MadeChange = false;
363
364 assert(!MI.isBundle() &&
365 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
366
367 const MCInstrDesc &MCID = MI.getDesc();
368 if (!MI.isPredicable())
369 return false;
370
371 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
372 if (MCID.operands()[i].isPredicate()) {
373 MachineOperand &MO = MI.getOperand(i);
374 if (MO.isReg()) {
375 MO.setReg(Pred[j].getReg());
376 MadeChange = true;
377 } else if (MO.isImm()) {
378 MO.setImm(Pred[j].getImm());
379 MadeChange = true;
380 } else if (MO.isMBB()) {
381 MO.setMBB(Pred[j].getMBB());
382 MadeChange = true;
383 }
384 ++j;
385 }
386 }
387 return MadeChange;
388}
389
391 const MachineInstr &MI,
393 size_t StartSize = Accesses.size();
394 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
395 oe = MI.memoperands_end();
396 o != oe; ++o) {
397 if ((*o)->isLoad() &&
398 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
399 Accesses.push_back(*o);
400 }
401 return Accesses.size() != StartSize;
402}
403
405 const MachineInstr &MI,
407 size_t StartSize = Accesses.size();
408 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
409 oe = MI.memoperands_end();
410 o != oe; ++o) {
411 if ((*o)->isStore() &&
412 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
413 Accesses.push_back(*o);
414 }
415 return Accesses.size() != StartSize;
416}
417
419 unsigned SubIdx, unsigned &Size,
420 unsigned &Offset,
421 const MachineFunction &MF) const {
422 if (!SubIdx) {
423 Size = TRI.getSpillSize(*RC);
424 Offset = 0;
425 return true;
426 }
427 unsigned BitSize = TRI.getSubRegIdxSize(SubIdx);
428 // Convert bit size to byte size.
429 if (BitSize % 8)
430 return false;
431
432 int BitOffset = TRI.getSubRegIdxOffset(SubIdx);
433 if (BitOffset < 0 || BitOffset % 8)
434 return false;
435
436 Size = BitSize / 8;
437 Offset = (unsigned)BitOffset / 8;
438
439 assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
440
441 if (!MF.getDataLayout().isLittleEndian()) {
442 Offset = TRI.getSpillSize(*RC) - (Offset + Size);
443 }
444 return true;
445}
446
449 Register DestReg, unsigned SubIdx,
450 const MachineInstr &Orig,
451 LaneBitmask UsedLanes) const {
452 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
453 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
454 MBB.insert(I, MI);
455}
456
458 const MachineInstr &MI1,
459 const MachineRegisterInfo *MRI) const {
461}
462
465 MachineBasicBlock::iterator InsertBefore,
466 const MachineInstr &Orig) const {
467 MachineFunction &MF = *MBB.getParent();
468 // CFI instructions are marked as non-duplicable, because Darwin compact
469 // unwind info emission can't handle multiple prologue setups.
470 assert((!Orig.isNotDuplicable() ||
472 Orig.isCFIInstruction())) &&
473 "Instruction cannot be duplicated");
474
475 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
476}
477
478// If the COPY instruction in MI can be folded to a stack operation, return
479// the register class to use.
481 const TargetInstrInfo &TII,
482 unsigned FoldIdx) {
483 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
484 if (MI.getNumOperands() != 2)
485 return nullptr;
486 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
487
488 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
489 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
490
491 if (FoldOp.getSubReg() || LiveOp.getSubReg())
492 return nullptr;
493
494 Register FoldReg = FoldOp.getReg();
495 Register LiveReg = LiveOp.getReg();
496
497 assert(FoldReg.isVirtual() && "Cannot fold physregs");
498
499 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
500 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
501
502 if (LiveOp.getReg().isPhysical())
503 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
504
505 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
506 return RC;
507
508 // FIXME: Allow folding when register classes are memory compatible.
509 return nullptr;
510}
511
512MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
513
514/// Try to remove the load by folding it to a register
515/// operand at the use. We fold the load instructions if load defines a virtual
516/// register, the virtual register is used once in the same BB, and the
517/// instructions in-between do not load or store, and have no side effects.
519 const MachineRegisterInfo *MRI,
520 Register &FoldAsLoadDefReg,
521 MachineInstr *&DefMI) const {
522 // Check whether we can move DefMI here.
523 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
524 assert(DefMI);
525 bool SawStore = false;
526 if (!DefMI->isSafeToMove(SawStore))
527 return nullptr;
528
529 // Collect information about virtual register operands of MI.
530 SmallVector<unsigned, 1> SrcOperandIds;
531 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
532 MachineOperand &MO = MI.getOperand(i);
533 if (!MO.isReg())
534 continue;
535 Register Reg = MO.getReg();
536 if (Reg != FoldAsLoadDefReg)
537 continue;
538 // Do not fold if we have a subreg use or a def.
539 if (MO.getSubReg() || MO.isDef())
540 return nullptr;
541 SrcOperandIds.push_back(i);
542 }
543 if (SrcOperandIds.empty())
544 return nullptr;
545
546 // Check whether we can fold the def into SrcOperandId.
547 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
548 FoldAsLoadDefReg = 0;
549 return FoldMI;
550 }
551
552 return nullptr;
553}
554
555std::pair<unsigned, unsigned>
557 switch (MI.getOpcode()) {
558 case TargetOpcode::STACKMAP:
559 // StackMapLiveValues are foldable
560 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
561 case TargetOpcode::PATCHPOINT:
562 // For PatchPoint, the call args are not foldable (even if reported in the
563 // stackmap e.g. via anyregcc).
564 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
565 case TargetOpcode::STATEPOINT:
566 // For statepoints, fold deopt and gc arguments, but not call arguments.
567 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
568 default:
569 llvm_unreachable("unexpected stackmap opcode");
570 }
571}
572
574 ArrayRef<unsigned> Ops, int FrameIndex,
575 const TargetInstrInfo &TII) {
576 unsigned StartIdx = 0;
577 unsigned NumDefs = 0;
578 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
579 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
580
581 unsigned DefToFoldIdx = MI.getNumOperands();
582
583 // Return false if any operands requested for folding are not foldable (not
584 // part of the stackmap's live values).
585 for (unsigned Op : Ops) {
586 if (Op < NumDefs) {
587 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
588 DefToFoldIdx = Op;
589 } else if (Op < StartIdx) {
590 return nullptr;
591 }
592 if (MI.getOperand(Op).isTied())
593 return nullptr;
594 }
595
596 MachineInstr *NewMI =
597 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
598 MachineInstrBuilder MIB(MF, NewMI);
599
600 // No need to fold return, the meta data, and function arguments
601 for (unsigned i = 0; i < StartIdx; ++i)
602 if (i != DefToFoldIdx)
603 MIB.add(MI.getOperand(i));
604
605 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
606 MachineOperand &MO = MI.getOperand(i);
607 unsigned TiedTo = e;
608 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
609
610 if (is_contained(Ops, i)) {
611 assert(TiedTo == e && "Cannot fold tied operands");
612 unsigned SpillSize;
613 unsigned SpillOffset;
614 // Compute the spill slot size and offset.
615 const TargetRegisterClass *RC =
616 MF.getRegInfo().getRegClass(MO.getReg());
617 bool Valid =
618 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
619 if (!Valid)
620 report_fatal_error("cannot spill patchpoint subregister operand");
621 MIB.addImm(StackMaps::IndirectMemRefOp);
622 MIB.addImm(SpillSize);
623 MIB.addFrameIndex(FrameIndex);
624 MIB.addImm(SpillOffset);
625 } else {
626 MIB.add(MO);
627 if (TiedTo < e) {
628 assert(TiedTo < NumDefs && "Bad tied operand");
629 if (TiedTo > DefToFoldIdx)
630 --TiedTo;
631 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
632 }
633 }
634 }
635 return NewMI;
636}
637
638static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
639 const TargetInstrInfo &TII) {
640 // If the machine operand is tied, untie it first.
641 if (MI->getOperand(OpNo).isTied()) {
642 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
643 MI->untieRegOperand(OpNo);
644 // Intentional recursion!
645 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
646 }
647
649 TII.getFrameIndexOperands(NewOps, FI);
650 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
651 MI->removeOperand(OpNo);
652 MI->insert(MI->operands_begin() + OpNo, NewOps);
653
654 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
655 // is the per-target number of operands that represent the memory operand
656 // excluding this one (MD). This includes MO.
658 F.setMemConstraint(InlineAsm::ConstraintCode::m);
659 MachineOperand &MD = MI->getOperand(OpNo - 1);
660 MD.setImm(F);
661}
662
663// Returns nullptr if not possible to fold.
665 ArrayRef<unsigned> Ops, int FI,
666 const TargetInstrInfo &TII) {
667 assert(MI.isInlineAsm() && "wrong opcode");
668 if (Ops.size() > 1)
669 return nullptr;
670 unsigned Op = Ops[0];
671 assert(Op && "should never be first operand");
672 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
673
674 if (!MI.mayFoldInlineAsmRegOp(Op))
675 return nullptr;
676
677 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
678
679 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
680
681 // Update mayload/maystore metadata, and memoperands.
682 const VirtRegInfo &RI =
683 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
686 if (RI.Reads) {
687 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
689 }
690 if (RI.Writes) {
691 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
693 }
694 MachineFunction *MF = NewMI.getMF();
695 const MachineFrameInfo &MFI = MF->getFrameInfo();
697 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
698 MFI.getObjectAlign(FI));
699 NewMI.addMemOperand(*MF, MMO);
700
701 return &NewMI;
702}
703
705 ArrayRef<unsigned> Ops, int FI,
706 LiveIntervals *LIS,
707 VirtRegMap *VRM) const {
708 auto Flags = MachineMemOperand::MONone;
709 for (unsigned OpIdx : Ops)
710 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
712
713 MachineBasicBlock *MBB = MI.getParent();
714 assert(MBB && "foldMemoryOperand needs an inserted instruction");
715 MachineFunction &MF = *MBB->getParent();
716
717 // If we're not folding a load into a subreg, the size of the load is the
718 // size of the spill slot. But if we are, we need to figure out what the
719 // actual load size is.
720 int64_t MemSize = 0;
721 const MachineFrameInfo &MFI = MF.getFrameInfo();
722
723 if (Flags & MachineMemOperand::MOStore) {
724 MemSize = MFI.getObjectSize(FI);
725 } else {
726 for (unsigned OpIdx : Ops) {
727 int64_t OpSize = MFI.getObjectSize(FI);
728
729 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
730 unsigned SubRegSize = TRI.getSubRegIdxSize(SubReg);
731 if (SubRegSize > 0 && !(SubRegSize % 8))
732 OpSize = SubRegSize / 8;
733 }
734
735 MemSize = std::max(MemSize, OpSize);
736 }
737 }
738
739 assert(MemSize && "Did not expect a zero-sized stack slot");
740
741 MachineInstr *NewMI = nullptr;
742
743 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
744 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
745 MI.getOpcode() == TargetOpcode::STATEPOINT) {
746 // Fold stackmap/patchpoint.
747 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
748 if (NewMI)
749 MBB->insert(MI, NewMI);
750 } else if (MI.isInlineAsm()) {
751 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
752 } else {
753 // Ask the target to do the actual folding.
754 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
755 }
756
757 if (NewMI) {
758 NewMI->setMemRefs(MF, MI.memoperands());
759 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
761 NewMI->mayStore()) &&
762 "Folded a def to a non-store!");
763 assert((!(Flags & MachineMemOperand::MOLoad) ||
764 NewMI->mayLoad()) &&
765 "Folded a use to a non-load!");
766 assert(MFI.getObjectOffset(FI) != -1);
767 MachineMemOperand *MMO =
769 Flags, MemSize, MFI.getObjectAlign(FI));
770 NewMI->addMemOperand(MF, MMO);
771
772 // The pass "x86 speculative load hardening" always attaches symbols to
773 // call instructions. We need copy it form old instruction.
774 NewMI->cloneInstrSymbols(MF, MI);
775
776 return NewMI;
777 }
778
779 // Straight COPY may fold as load/store.
780 if (!isCopyInstr(MI) || Ops.size() != 1)
781 return nullptr;
782
783 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
784 if (!RC)
785 return nullptr;
786
787 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
789 if (Flags == MachineMemOperand::MOStore) {
790 if (MO.isUndef()) {
791 // If this is an undef copy, we do not need to bother we inserting spill
792 // code.
793 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
794 } else {
795 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC,
796 Register());
797 }
798 } else
799 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, Register());
800
801 return &*--Pos;
802}
803
806 MachineInstr &LoadMI,
807 LiveIntervals *LIS) const {
808 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
809#ifndef NDEBUG
810 for (unsigned OpIdx : Ops)
811 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
812#endif
813
814 MachineBasicBlock &MBB = *MI.getParent();
815 MachineFunction &MF = *MBB.getParent();
816
817 // Ask the target to do the actual folding.
818 MachineInstr *NewMI = nullptr;
819 int FrameIndex = 0;
820
821 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
822 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
823 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
824 isLoadFromStackSlot(LoadMI, FrameIndex)) {
825 // Fold stackmap/patchpoint.
826 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
827 if (NewMI)
828 NewMI = &*MBB.insert(MI, NewMI);
829 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
830 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
831 } else {
832 // Ask the target to do the actual folding.
833 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
834 }
835
836 if (!NewMI)
837 return nullptr;
838
839 // Copy the memoperands from the load to the folded instruction.
840 if (MI.memoperands_empty()) {
841 NewMI->setMemRefs(MF, LoadMI.memoperands());
842 } else {
843 // Handle the rare case of folding multiple loads.
844 NewMI->setMemRefs(MF, MI.memoperands());
846 E = LoadMI.memoperands_end();
847 I != E; ++I) {
848 NewMI->addMemOperand(MF, *I);
849 }
850 }
851 return NewMI;
852}
853
854/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
855/// replacement instructions immediately precede it. Copy any implicit
856/// operands from MI to the replacement instruction.
858 const TargetRegisterInfo *TRI) {
860 --CopyMI;
861
862 Register DstReg = MI->getOperand(0).getReg();
863 for (const MachineOperand &MO : MI->implicit_operands()) {
864 CopyMI->addOperand(MO);
865
866 // Be conservative about preserving kills when subregister defs are
867 // involved. If there was implicit kill of a super-register overlapping the
868 // copy result, we would kill the subregisters previous copies defined.
869
870 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
871 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
872 }
873}
874
876 MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
877 if (MI->allDefsAreDead()) {
878 MI->setDesc(get(TargetOpcode::KILL));
879 return;
880 }
881
882 MachineOperand &DstMO = MI->getOperand(0);
883 MachineOperand &SrcMO = MI->getOperand(1);
884
885 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
886 if (IdentityCopy || SrcMO.isUndef()) {
887 // No need to insert an identity copy instruction, but replace with a KILL
888 // if liveness is changed.
889 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
890 // We must make sure the super-register gets killed. Replace the
891 // instruction with KILL.
892 MI->setDesc(get(TargetOpcode::KILL));
893 return;
894 }
895 // Vanilla identity copy.
896 MI->eraseFromParent();
897 return;
898 }
899
900 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
901 SrcMO.getReg(), SrcMO.isKill(),
902 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
903 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
904
905 if (MI->getNumOperands() > 2)
907 MI->eraseFromParent();
908}
909
911 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
912 const MachineOperand &Op1 = Inst.getOperand(1);
913 const MachineOperand &Op2 = Inst.getOperand(2);
914 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
915
916 // We need virtual register definitions for the operands that we will
917 // reassociate.
918 MachineInstr *MI1 = nullptr;
919 MachineInstr *MI2 = nullptr;
920 if (Op1.isReg() && Op1.getReg().isVirtual())
921 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
922 if (Op2.isReg() && Op2.getReg().isVirtual())
923 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
924
925 // And at least one operand must be defined in MBB.
926 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
927}
928
930 unsigned Opcode2) const {
931 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
932}
933
935 bool &Commuted) const {
936 const MachineBasicBlock *MBB = Inst.getParent();
937 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
938 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
939 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
940 unsigned Opcode = Inst.getOpcode();
941
942 // If only one operand has the same or inverse opcode and it's the second
943 // source operand, the operands must be commuted.
944 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
945 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
946 if (Commuted)
947 std::swap(MI1, MI2);
948
949 // 1. The previous instruction must be the same type as Inst.
950 // 2. The previous instruction must also be associative/commutative or be the
951 // inverse of such an operation (this can be different even for
952 // instructions with the same opcode if traits like fast-math-flags are
953 // included).
954 // 3. The previous instruction must have virtual register definitions for its
955 // operands in the same basic block as Inst.
956 // 4. The previous instruction's result must only be used by Inst.
957 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
959 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
961 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
962}
963
964// 1. The operation must be associative and commutative or be the inverse of
965// such an operation.
966// 2. The instruction must have virtual register definitions for its
967// operands in the same basic block.
968// 3. The instruction must have a reassociable sibling.
970 bool &Commuted) const {
971 return (isAssociativeAndCommutative(Inst) ||
972 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
973 hasReassociableOperands(Inst, Inst.getParent()) &&
974 hasReassociableSibling(Inst, Commuted);
975}
976
977// Utility routine that checks if \param MO is defined by an
978// \param CombineOpc instruction in the basic block \param MBB.
979// If \param CombineOpc is not provided, the OpCode check will
980// be skipped.
982 unsigned CombineOpc = 0) {
983 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
984 MachineInstr *MI = nullptr;
985
986 if (MO.isReg() && MO.getReg().isVirtual())
987 MI = MRI.getUniqueVRegDef(MO.getReg());
988 // And it needs to be in the trace (otherwise, it won't have a depth).
989 if (!MI || MI->getParent() != &MBB ||
990 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
991 return false;
992 // Must only used by the user we combine with.
993 if (!MRI.hasOneNonDBGUse(MO.getReg()))
994 return false;
995
996 return true;
997}
998
999// A chain of accumulation instructions will be selected IFF:
1000// 1. All the accumulation instructions in the chain have the same opcode,
1001// besides the first that has a slightly different opcode because it does
1002// not accumulate into a register.
1003// 2. All the instructions in the chain are combinable (have a single use
1004// which itself is part of the chain).
1005// 3. Meets the required minimum length.
1007 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1008 // Walk up the chain of accumulation instructions and collect them in the
1009 // vector.
1010 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1011 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1012 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1013 std::optional<unsigned> ChainStartOpCode =
1014 getAccumulationStartOpcode(AccumulatorOpcode);
1015
1016 if (!ChainStartOpCode.has_value())
1017 return;
1018
1019 // Push the first accumulator result to the start of the chain.
1020 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1021
1022 // Collect the accumulator input register from all instructions in the chain.
1023 while (CurrentInstr &&
1024 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1025 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1026 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1027 }
1028
1029 // Add the instruction at the top of the chain.
1030 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1031 canCombine(MBB, CurrentInstr->getOperand(1)))
1032 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1033}
1034
1035/// Find chains of accumulations that can be rewritten as a tree for increased
1036/// ILP.
1038 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1040 return false;
1041
1042 unsigned Opc = Root.getOpcode();
1044 return false;
1045
1046 // Verify that this is the end of the chain.
1047 MachineBasicBlock &MBB = *Root.getParent();
1048 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1049 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1050 return false;
1051
1052 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1053 if (User->getOpcode() == Opc)
1054 return false;
1055
1056 // Walk up the use chain and collect the reduction chain.
1058 getAccumulatorChain(&Root, Chain);
1059
1060 // Reject chains which are too short to be worth modifying.
1061 if (Chain.size() < MinAccumulatorDepth)
1062 return false;
1063
1064 // Check if the MBB this instruction is a part of contains any other chains.
1065 // If so, don't apply it.
1066 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1067 for (const auto &I : MBB) {
1068 if (I.getOpcode() == Opc &&
1069 !ReductionChain.contains(I.getOperand(0).getReg()))
1070 return false;
1071 }
1072
1074 return true;
1075}
1076
1077// Reduce branches of the accumulator tree by adding them together.
1079 SmallVectorImpl<Register> &RegistersToReduce,
1082 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1083 Register ResultReg) const {
1086
1087 // Get the opcode for the reduction instruction we will need to build.
1088 // If for some reason it is not defined, early exit and don't apply this.
1089 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1090
1091 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1092 auto RHS = RegistersToReduce[i - 1];
1093 auto LHS = RegistersToReduce[i];
1094 Register Dest;
1095 // If we are reducing 2 registers, reuse the original result register.
1096 if (RegistersToReduce.size() == 2)
1097 Dest = ResultReg;
1098 // Otherwise, create a new virtual register to hold the partial sum.
1099 else {
1100 auto NewVR = MRI.createVirtualRegister(
1101 MRI.getRegClass(Root.getOperand(0).getReg()));
1102 Dest = NewVR;
1103 NewRegs.push_back(Dest);
1104 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1105 }
1106
1107 // Create the new reduction instruction.
1109 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1110 .addReg(RHS, getKillRegState(true))
1111 .addReg(LHS, getKillRegState(true));
1112 // Copy any flags needed from the original instruction.
1113 MIB->setFlags(Root.getFlags());
1114 InsInstrs.push_back(MIB);
1115 }
1116
1117 // If the number of registers to reduce is odd, add the remaining register to
1118 // the vector of registers to reduce.
1119 if (RegistersToReduce.size() % 2 != 0)
1120 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1121
1122 RegistersToReduce = std::move(NewRegs);
1123}
1124
1125// The concept of the reassociation pass is that these operations can benefit
1126// from this kind of transformation:
1127//
1128// A = ? op ?
1129// B = A op X (Prev)
1130// C = B op Y (Root)
1131// -->
1132// A = ? op ?
1133// B = X op Y
1134// C = A op B
1135//
1136// breaking the dependency between A and B, allowing them to be executed in
1137// parallel (or back-to-back in a pipeline) instead of depending on each other.
1138
1139// FIXME: This has the potential to be expensive (compile time) while not
1140// improving the code at all. Some ways to limit the overhead:
1141// 1. Track successful transforms; bail out if hit rate gets too low.
1142// 2. Only enable at -O3 or some other non-default optimization level.
1143// 3. Pre-screen pattern candidates here: if an operand of the previous
1144// instruction is known to not increase the critical path, then don't match
1145// that pattern.
1147 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1148 bool DoRegPressureReduce) const {
1149 bool Commute;
1150 if (isReassociationCandidate(Root, Commute)) {
1151 // We found a sequence of instructions that may be suitable for a
1152 // reassociation of operands to increase ILP. Specify each commutation
1153 // possibility for the Prev instruction in the sequence and let the
1154 // machine combiner decide if changing the operands is worthwhile.
1155 if (Commute) {
1158 } else {
1161 }
1162 return true;
1163 }
1164 if (getAccumulatorReassociationPatterns(Root, Patterns))
1165 return true;
1166
1167 return false;
1168}
1169
1170/// Return true when a code sequence can improve loop throughput.
1172 return false;
1173}
1174
1177 switch (Pattern) {
1180 default:
1182 }
1183}
1184
1185std::pair<unsigned, unsigned>
1187 const MachineInstr &Root,
1188 const MachineInstr &Prev) const {
1189 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1190 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1191
1192 // Early exit if both opcodes are associative and commutative. It's a trivial
1193 // reassociation when we only change operands order. In this case opcodes are
1194 // not required to have inverse versions.
1195 if (AssocCommutRoot && AssocCommutPrev) {
1196 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1197 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1198 }
1199
1200 // At least one instruction is not associative or commutative.
1201 // Since we have matched one of the reassociation patterns, we expect that the
1202 // instructions' opcodes are equal or one of them is the inversion of the
1203 // other.
1205 "Incorrectly matched pattern");
1206 unsigned AssocCommutOpcode = Root.getOpcode();
1207 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1208 if (!AssocCommutRoot)
1209 std::swap(AssocCommutOpcode, InverseOpcode);
1210
1211 // The transformation rule (`+` is any associative and commutative binary
1212 // operation, `-` is the inverse):
1213 // REASSOC_AX_BY:
1214 // (A + X) + Y => A + (X + Y)
1215 // (A + X) - Y => A + (X - Y)
1216 // (A - X) + Y => A - (X - Y)
1217 // (A - X) - Y => A - (X + Y)
1218 // REASSOC_XA_BY:
1219 // (X + A) + Y => (X + Y) + A
1220 // (X + A) - Y => (X - Y) + A
1221 // (X - A) + Y => (X + Y) - A
1222 // (X - A) - Y => (X - Y) - A
1223 // REASSOC_AX_YB:
1224 // Y + (A + X) => (Y + X) + A
1225 // Y - (A + X) => (Y - X) - A
1226 // Y + (A - X) => (Y - X) + A
1227 // Y - (A - X) => (Y + X) - A
1228 // REASSOC_XA_YB:
1229 // Y + (X + A) => (Y + X) + A
1230 // Y - (X + A) => (Y - X) - A
1231 // Y + (X - A) => (Y + X) - A
1232 // Y - (X - A) => (Y - X) + A
1233 switch (Pattern) {
1234 default:
1235 llvm_unreachable("Unexpected pattern");
1237 if (!AssocCommutRoot && AssocCommutPrev)
1238 return {AssocCommutOpcode, InverseOpcode};
1239 if (AssocCommutRoot && !AssocCommutPrev)
1240 return {InverseOpcode, InverseOpcode};
1241 if (!AssocCommutRoot && !AssocCommutPrev)
1242 return {InverseOpcode, AssocCommutOpcode};
1243 break;
1245 if (!AssocCommutRoot && AssocCommutPrev)
1246 return {AssocCommutOpcode, InverseOpcode};
1247 if (AssocCommutRoot && !AssocCommutPrev)
1248 return {InverseOpcode, AssocCommutOpcode};
1249 if (!AssocCommutRoot && !AssocCommutPrev)
1250 return {InverseOpcode, InverseOpcode};
1251 break;
1253 if (!AssocCommutRoot && AssocCommutPrev)
1254 return {InverseOpcode, InverseOpcode};
1255 if (AssocCommutRoot && !AssocCommutPrev)
1256 return {AssocCommutOpcode, InverseOpcode};
1257 if (!AssocCommutRoot && !AssocCommutPrev)
1258 return {InverseOpcode, AssocCommutOpcode};
1259 break;
1261 if (!AssocCommutRoot && AssocCommutPrev)
1262 return {InverseOpcode, InverseOpcode};
1263 if (AssocCommutRoot && !AssocCommutPrev)
1264 return {InverseOpcode, AssocCommutOpcode};
1265 if (!AssocCommutRoot && !AssocCommutPrev)
1266 return {AssocCommutOpcode, InverseOpcode};
1267 break;
1268 }
1269 llvm_unreachable("Unhandled combination");
1270}
1271
1272// Return a pair of boolean flags showing if the new root and new prev operands
1273// must be swapped. See visual example of the rule in
1274// TargetInstrInfo::getReassociationOpcodes.
1275static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1276 switch (Pattern) {
1277 default:
1278 llvm_unreachable("Unexpected pattern");
1280 return {false, false};
1282 return {true, false};
1284 return {true, true};
1286 return {true, true};
1287 }
1288}
1289
1291 const MachineInstr &Root, unsigned Pattern,
1292 std::array<unsigned, 5> &OperandIndices) const {
1293 switch (Pattern) {
1295 OperandIndices = {1, 1, 1, 2, 2};
1296 break;
1298 OperandIndices = {2, 1, 2, 2, 1};
1299 break;
1301 OperandIndices = {1, 2, 1, 1, 2};
1302 break;
1304 OperandIndices = {2, 2, 2, 1, 1};
1305 break;
1306 default:
1307 llvm_unreachable("unexpected MachineCombinerPattern");
1308 }
1309}
1310
1311/// Attempt the reassociation transformation to reduce critical path length.
1312/// See the above comments before getMachineCombinerPatterns().
1314 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1318 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1319 MachineFunction *MF = Root.getMF();
1320 MachineRegisterInfo &MRI = MF->getRegInfo();
1322 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, &TRI);
1323
1328 MachineOperand &OpC = Root.getOperand(0);
1329
1330 Register RegA = OpA.getReg();
1331 unsigned SubRegA = OpA.getSubReg();
1332 Register RegB = OpB.getReg();
1333 Register RegX = OpX.getReg();
1334 unsigned SubRegX = OpX.getSubReg();
1335 Register RegY = OpY.getReg();
1336 unsigned SubRegY = OpY.getSubReg();
1337 Register RegC = OpC.getReg();
1338
1339 if (RegA.isVirtual())
1340 MRI.constrainRegClass(RegA, RC);
1341 if (RegB.isVirtual())
1342 MRI.constrainRegClass(RegB, RC);
1343 if (RegX.isVirtual())
1344 MRI.constrainRegClass(RegX, RC);
1345 if (RegY.isVirtual())
1346 MRI.constrainRegClass(RegY, RC);
1347 if (RegC.isVirtual())
1348 MRI.constrainRegClass(RegC, RC);
1349
1350 // Create a new virtual register for the result of (X op Y) instead of
1351 // recycling RegB because the MachineCombiner's computation of the critical
1352 // path requires a new register definition rather than an existing one.
1353 Register NewVR = MRI.createVirtualRegister(RC);
1354 unsigned SubRegNewVR = 0;
1355 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1356
1357 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1358 bool KillA = OpA.isKill();
1359 bool KillX = OpX.isKill();
1360 bool KillY = OpY.isKill();
1361 bool KillNewVR = true;
1362
1363 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1364
1365 if (SwapPrevOperands) {
1366 std::swap(RegX, RegY);
1367 std::swap(SubRegX, SubRegY);
1368 std::swap(KillX, KillY);
1369 }
1370
1371 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1372 unsigned RootFirstOpIdx, RootSecondOpIdx;
1373 switch (Pattern) {
1375 PrevFirstOpIdx = OperandIndices[1];
1376 PrevSecondOpIdx = OperandIndices[3];
1377 RootFirstOpIdx = OperandIndices[2];
1378 RootSecondOpIdx = OperandIndices[4];
1379 break;
1381 PrevFirstOpIdx = OperandIndices[1];
1382 PrevSecondOpIdx = OperandIndices[3];
1383 RootFirstOpIdx = OperandIndices[4];
1384 RootSecondOpIdx = OperandIndices[2];
1385 break;
1387 PrevFirstOpIdx = OperandIndices[3];
1388 PrevSecondOpIdx = OperandIndices[1];
1389 RootFirstOpIdx = OperandIndices[2];
1390 RootSecondOpIdx = OperandIndices[4];
1391 break;
1393 PrevFirstOpIdx = OperandIndices[3];
1394 PrevSecondOpIdx = OperandIndices[1];
1395 RootFirstOpIdx = OperandIndices[4];
1396 RootSecondOpIdx = OperandIndices[2];
1397 break;
1398 default:
1399 llvm_unreachable("unexpected MachineCombinerPattern");
1400 }
1401
1402 // Basically BuildMI but doesn't add implicit operands by default.
1403 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1404 const MCInstrDesc &MCID, Register DestReg) {
1405 return MachineInstrBuilder(
1406 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1407 .copyMIMetadata(MIMD)
1408 .addReg(DestReg, RegState::Define);
1409 };
1410
1411 // Create new instructions for insertion.
1412 MachineInstrBuilder MIB1 =
1413 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1414 for (const auto &MO : Prev.explicit_operands()) {
1415 unsigned Idx = MO.getOperandNo();
1416 // Skip the result operand we'd already added.
1417 if (Idx == 0)
1418 continue;
1419 if (Idx == PrevFirstOpIdx)
1420 MIB1.addReg(RegX, getKillRegState(KillX), SubRegX);
1421 else if (Idx == PrevSecondOpIdx)
1422 MIB1.addReg(RegY, getKillRegState(KillY), SubRegY);
1423 else
1424 MIB1.add(MO);
1425 }
1426 MIB1.copyImplicitOps(Prev);
1427
1428 if (SwapRootOperands) {
1429 std::swap(RegA, NewVR);
1430 std::swap(SubRegA, SubRegNewVR);
1431 std::swap(KillA, KillNewVR);
1432 }
1433
1434 MachineInstrBuilder MIB2 =
1435 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1436 for (const auto &MO : Root.explicit_operands()) {
1437 unsigned Idx = MO.getOperandNo();
1438 // Skip the result operand.
1439 if (Idx == 0)
1440 continue;
1441 if (Idx == RootFirstOpIdx)
1442 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA), SubRegA);
1443 else if (Idx == RootSecondOpIdx)
1444 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR), SubRegNewVR);
1445 else
1446 MIB2 = MIB2.add(MO);
1447 }
1448 MIB2.copyImplicitOps(Root);
1449
1450 // Propagate FP flags from the original instructions.
1451 // But clear poison-generating flags because those may not be valid now.
1452 // TODO: There should be a helper function for copying only fast-math-flags.
1453 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1454 MIB1->setFlags(IntersectedFlags);
1459
1460 MIB2->setFlags(IntersectedFlags);
1465
1466 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1467
1468 // Record new instructions for insertion and old instructions for deletion.
1469 InsInstrs.push_back(MIB1);
1470 InsInstrs.push_back(MIB2);
1471 DelInstrs.push_back(&Prev);
1472 DelInstrs.push_back(&Root);
1473
1474 // We transformed:
1475 // B = A op X (Prev)
1476 // C = B op Y (Root)
1477 // Into:
1478 // B = X op Y (MIB1)
1479 // C = A op B (MIB2)
1480 // C has the same value as before, B doesn't; as such, keep the debug number
1481 // of C but not of B.
1482 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1483 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1484}
1485
1487 MachineInstr &Root, unsigned Pattern,
1490 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1491 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1492 MachineBasicBlock &MBB = *Root.getParent();
1493 MachineFunction &MF = *MBB.getParent();
1495
1496 switch (Pattern) {
1501 // Select the previous instruction in the sequence based on the input
1502 // pattern.
1503 std::array<unsigned, 5> OperandIndices;
1505 MachineInstr *Prev =
1507
1508 // Don't reassociate if Prev and Root are in different blocks.
1509 if (Prev->getParent() != Root.getParent())
1510 return;
1511
1512 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1513 InstIdxForVirtReg);
1514 break;
1515 }
1517 SmallVector<Register, 32> ChainRegs;
1518 getAccumulatorChain(&Root, ChainRegs);
1519 unsigned int Depth = ChainRegs.size();
1521 "Max accumulator width set to illegal value");
1522 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1523 ? Log2_32(Depth)
1525
1526 // Walk down the chain and rewrite it as a tree.
1527 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1528 // No need to rewrite the first node, it is already perfect as it is.
1529 if (IndexedReg.index() == 0)
1530 continue;
1531
1532 // FIXME: Losing subregisters
1533 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1535 Register AccReg;
1536 if (IndexedReg.index() < MaxWidth) {
1537 // Now we need to create new instructions for the first row.
1538 AccReg = Instr->getOperand(0).getReg();
1539 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1540
1541 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1542 .addReg(Instr->getOperand(2).getReg(),
1543 getKillRegState(Instr->getOperand(2).isKill()))
1544 .addReg(Instr->getOperand(3).getReg(),
1545 getKillRegState(Instr->getOperand(3).isKill()));
1546 } else {
1547 // For the remaining cases, we need to use an output register of one of
1548 // the newly inserted instuctions as operand 1
1549 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1551 MRI.getRegClass(Root.getOperand(0).getReg()))
1552 : Instr->getOperand(0).getReg();
1553 assert(IndexedReg.index() >= MaxWidth);
1554 auto AccumulatorInput =
1555 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1556 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1557 AccReg)
1558 .addReg(AccumulatorInput, getKillRegState(true))
1559 .addReg(Instr->getOperand(2).getReg(),
1560 getKillRegState(Instr->getOperand(2).isKill()))
1561 .addReg(Instr->getOperand(3).getReg(),
1562 getKillRegState(Instr->getOperand(3).isKill()));
1563 }
1564
1565 MIB->setFlags(Instr->getFlags());
1566 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1567 InsInstrs.push_back(MIB);
1568 DelInstrs.push_back(Instr);
1569 }
1570
1571 SmallVector<Register, 8> RegistersToReduce;
1572 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1573 ++i) {
1574 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1575 RegistersToReduce.push_back(Reg);
1576 }
1577
1578 while (RegistersToReduce.size() > 1)
1579 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1580 InstIdxForVirtReg, Root.getOperand(0).getReg());
1581
1582 break;
1583 }
1584 }
1585}
1586
1590
1592 const MachineInstr &MI) const {
1593 const MachineFunction &MF = *MI.getMF();
1594 const MachineRegisterInfo &MRI = MF.getRegInfo();
1595
1596 // Remat clients assume operand 0 is the defined register.
1597 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1598 return false;
1599 Register DefReg = MI.getOperand(0).getReg();
1600
1601 // A sub-register definition can only be rematerialized if the instruction
1602 // doesn't read the other parts of the register. Otherwise it is really a
1603 // read-modify-write operation on the full virtual register which cannot be
1604 // moved safely.
1605 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1606 MI.readsVirtualRegister(DefReg))
1607 return false;
1608
1609 // A load from a fixed stack slot can be rematerialized. This may be
1610 // redundant with subsequent checks, but it's target-independent,
1611 // simple, and a common case.
1612 int FrameIdx = 0;
1613 if (isLoadFromStackSlot(MI, FrameIdx) &&
1614 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1615 return true;
1616
1617 // Avoid instructions obviously unsafe for remat.
1618 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1619 MI.hasUnmodeledSideEffects())
1620 return false;
1621
1622 // Don't remat inline asm. We have no idea how expensive it is
1623 // even if it's side effect free.
1624 if (MI.isInlineAsm())
1625 return false;
1626
1627 // Avoid instructions which load from potentially varying memory.
1628 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1629 return false;
1630
1631 // If any of the registers accessed are non-constant, conservatively assume
1632 // the instruction is not rematerializable.
1633 for (const MachineOperand &MO : MI.operands()) {
1634 if (!MO.isReg()) continue;
1635 Register Reg = MO.getReg();
1636 if (Reg == 0)
1637 continue;
1638
1639 // Check for a well-behaved physical register.
1640 if (Reg.isPhysical()) {
1641 if (MO.isUse()) {
1642 // If the physreg has no defs anywhere, it's just an ambient register
1643 // and we can freely move its uses. Alternatively, if it's allocatable,
1644 // it could get allocated to something with a def during allocation.
1645 if (!MRI.isConstantPhysReg(Reg))
1646 return false;
1647 } else {
1648 // A physreg def. We can't remat it.
1649 return false;
1650 }
1651 continue;
1652 }
1653
1654 // Only allow one virtual-register def. There may be multiple defs of the
1655 // same virtual register, though.
1656 if (MO.isDef() && Reg != DefReg)
1657 return false;
1658 }
1659
1660 // Everything checked out.
1661 return true;
1662}
1663
1665 const MachineFunction *MF = MI.getMF();
1667 bool StackGrowsDown =
1669
1670 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1671 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1672
1673 if (!isFrameInstr(MI))
1674 return 0;
1675
1676 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1677
1678 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1679 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1680 SPAdj = -SPAdj;
1681
1682 return SPAdj;
1683}
1684
1685/// isSchedulingBoundary - Test if the given instruction should be
1686/// considered a scheduling boundary. This primarily includes labels
1687/// and terminators.
1689 const MachineBasicBlock *MBB,
1690 const MachineFunction &MF) const {
1691 // Terminators and labels can't be scheduled around.
1692 if (MI.isTerminator() || MI.isPosition())
1693 return true;
1694
1695 // INLINEASM_BR can jump to another block
1696 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1697 return true;
1698
1699 // Don't attempt to schedule around any instruction that defines
1700 // a stack-oriented pointer, as it's unlikely to be profitable. This
1701 // saves compile time, because it doesn't require every single
1702 // stack slot reference to depend on the instruction that does the
1703 // modification.
1704 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1705 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), &TRI);
1706}
1707
1708// Provide a global flag for disabling the PreRA hazard recognizer that targets
1709// may choose to honor.
1713
1714// Default implementation of CreateTargetRAHazardRecognizer.
1717 const ScheduleDAG *DAG) const {
1718 // Dummy hazard recognizer allows all instructions to issue.
1719 return new ScheduleHazardRecognizer();
1720}
1721
1722// Default implementation of CreateTargetMIHazardRecognizer.
1724 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1725 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1726}
1727
1728// Default implementation of CreateTargetPostRAHazardRecognizer.
1734
1735// Default implementation of getMemOperandWithOffset.
1737 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1738 bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
1741 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1742 Width, &TRI) ||
1743 BaseOps.size() != 1)
1744 return false;
1745 BaseOp = BaseOps.front();
1746 return true;
1747}
1748
1749//===----------------------------------------------------------------------===//
1750// SelectionDAG latency interface.
1751//===----------------------------------------------------------------------===//
1752
1753std::optional<unsigned>
1755 SDNode *DefNode, unsigned DefIdx,
1756 SDNode *UseNode, unsigned UseIdx) const {
1757 if (!ItinData || ItinData->isEmpty())
1758 return std::nullopt;
1759
1760 if (!DefNode->isMachineOpcode())
1761 return std::nullopt;
1762
1763 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1764 if (!UseNode->isMachineOpcode())
1765 return ItinData->getOperandCycle(DefClass, DefIdx);
1766 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1767 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1768}
1769
1771 SDNode *N) const {
1772 if (!ItinData || ItinData->isEmpty())
1773 return 1;
1774
1775 if (!N->isMachineOpcode())
1776 return 1;
1777
1778 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1779}
1780
1781//===----------------------------------------------------------------------===//
1782// MachineInstr latency interface.
1783//===----------------------------------------------------------------------===//
1784
1786 const MachineInstr &MI) const {
1787 if (!ItinData || ItinData->isEmpty())
1788 return 1;
1789
1790 unsigned Class = MI.getDesc().getSchedClass();
1791 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1792 if (UOps >= 0)
1793 return UOps;
1794
1795 // The # of u-ops is dynamically determined. The specific target should
1796 // override this function to return the right number.
1797 return 1;
1798}
1799
1800/// Return the default expected latency for a def based on it's opcode.
1802 const MachineInstr &DefMI) const {
1803 if (DefMI.isTransient())
1804 return 0;
1805 if (DefMI.mayLoad())
1806 return SchedModel.LoadLatency;
1807 if (isHighLatencyDef(DefMI.getOpcode()))
1808 return SchedModel.HighLatency;
1809 return 1;
1810}
1811
1813 return 0;
1814}
1815
1817 const MachineInstr &MI,
1818 unsigned *PredCost) const {
1819 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1820 // still have a MinLatency property, which getStageLatency checks.
1821 if (!ItinData)
1822 return MI.mayLoad() ? 2 : 1;
1823
1824 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1825}
1826
1828 const MachineInstr &DefMI,
1829 unsigned DefIdx) const {
1830 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1831 if (!ItinData || ItinData->isEmpty())
1832 return false;
1833
1834 unsigned DefClass = DefMI.getDesc().getSchedClass();
1835 std::optional<unsigned> DefCycle =
1836 ItinData->getOperandCycle(DefClass, DefIdx);
1837 return DefCycle && DefCycle <= 1U;
1838}
1839
1841 // TODO: We don't split functions where a section attribute has been set
1842 // since the split part may not be placed in a contiguous region. It may also
1843 // be more beneficial to augment the linker to ensure contiguous layout of
1844 // split functions within the same section as specified by the attribute.
1845 if (MF.getFunction().hasSection())
1846 return false;
1847
1848 // We don't want to proceed further for cold functions
1849 // or functions of unknown hotness. Lukewarm functions have no prefix.
1850 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1851 if (SectionPrefix &&
1852 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1853 return false;
1854 }
1855
1856 return true;
1857}
1858
1859std::optional<ParamLoadedValue>
1861 Register Reg) const {
1862 const MachineFunction *MF = MI.getMF();
1864 int64_t Offset;
1865 bool OffsetIsScalable;
1866
1867 // To simplify the sub-register handling, verify that we only need to
1868 // consider physical registers.
1869 assert(MF->getProperties().hasNoVRegs());
1870
1871 if (auto DestSrc = isCopyInstr(MI)) {
1872 Register DestReg = DestSrc->Destination->getReg();
1873
1874 // If the copy destination is the forwarding reg, describe the forwarding
1875 // reg using the copy source as the backup location. Example:
1876 //
1877 // x0 = MOV x7
1878 // call callee(x0) ; x0 described as x7
1879 if (Reg == DestReg)
1880 return ParamLoadedValue(*DestSrc->Source, Expr);
1881
1882 // If the target's hook couldn't describe this copy, give up.
1883 return std::nullopt;
1884 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1885 Register SrcReg = RegImm->Reg;
1886 Offset = RegImm->Imm;
1888 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1889 } else if (MI.hasOneMemOperand()) {
1890 // Only describe memory which provably does not escape the function. As
1891 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1892 // callee (or by another thread).
1893 const MachineFrameInfo &MFI = MF->getFrameInfo();
1894 const MachineMemOperand *MMO = MI.memoperands()[0];
1895 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1896
1897 // If the address points to "special" memory (e.g. a spill slot), it's
1898 // sufficient to check that it isn't aliased by any high-level IR value.
1899 if (!PSV || PSV->mayAlias(&MFI))
1900 return std::nullopt;
1901
1902 const MachineOperand *BaseOp;
1903 if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
1904 return std::nullopt;
1905
1906 // FIXME: Scalable offsets are not yet handled in the offset code below.
1907 if (OffsetIsScalable)
1908 return std::nullopt;
1909
1910 // TODO: Can currently only handle mem instructions with a single define.
1911 // An example from the x86 target:
1912 // ...
1913 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1914 // ...
1915 //
1916 if (MI.getNumExplicitDefs() != 1)
1917 return std::nullopt;
1918
1919 // TODO: In what way do we need to take Reg into consideration here?
1920
1923 Ops.push_back(dwarf::DW_OP_deref_size);
1924 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1925 : ~UINT64_C(0));
1926 Expr = DIExpression::prependOpcodes(Expr, Ops);
1927 return ParamLoadedValue(*BaseOp, Expr);
1928 }
1929
1930 return std::nullopt;
1931}
1932
1933// Get the call frame size just before MI.
1935 // Search backwards from MI for the most recent call frame instruction.
1936 MachineBasicBlock *MBB = MI.getParent();
1937 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1938 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1939 return getFrameTotalSize(AdjI);
1940 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1941 return 0;
1942 }
1943
1944 // If none was found, use the call frame size from the start of the basic
1945 // block.
1946 return MBB->getCallFrameSize();
1947}
1948
1949/// Both DefMI and UseMI must be valid. By default, call directly to the
1950/// itinerary. This may be overriden by the target.
1952 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1953 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1954 unsigned DefClass = DefMI.getDesc().getSchedClass();
1955 unsigned UseClass = UseMI.getDesc().getSchedClass();
1956 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1957}
1958
1960 const MachineInstr &MI, unsigned DefIdx,
1961 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1962 assert((MI.isRegSequence() ||
1963 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1964
1965 if (!MI.isRegSequence())
1966 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1967
1968 // We are looking at:
1969 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1970 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1971 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1972 OpIdx += 2) {
1973 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1974 if (MOReg.isUndef())
1975 continue;
1976 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1977 assert(MOSubIdx.isImm() &&
1978 "One of the subindex of the reg_sequence is not an immediate");
1979 // Record Reg:SubReg, SubIdx.
1980 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1981 (unsigned)MOSubIdx.getImm()));
1982 }
1983 return true;
1984}
1985
1987 const MachineInstr &MI, unsigned DefIdx,
1988 RegSubRegPairAndIdx &InputReg) const {
1989 assert((MI.isExtractSubreg() ||
1990 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1991
1992 if (!MI.isExtractSubreg())
1993 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1994
1995 // We are looking at:
1996 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1997 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1998 const MachineOperand &MOReg = MI.getOperand(1);
1999 if (MOReg.isUndef())
2000 return false;
2001 const MachineOperand &MOSubIdx = MI.getOperand(2);
2002 assert(MOSubIdx.isImm() &&
2003 "The subindex of the extract_subreg is not an immediate");
2004
2005 InputReg.Reg = MOReg.getReg();
2006 InputReg.SubReg = MOReg.getSubReg();
2007 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2008 return true;
2009}
2010
2012 const MachineInstr &MI, unsigned DefIdx,
2013 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2014 assert((MI.isInsertSubreg() ||
2015 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2016
2017 if (!MI.isInsertSubreg())
2018 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2019
2020 // We are looking at:
2021 // Def = INSERT_SEQUENCE v0, v1, sub0.
2022 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2023 const MachineOperand &MOBaseReg = MI.getOperand(1);
2024 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2025 if (MOInsertedReg.isUndef())
2026 return false;
2027 const MachineOperand &MOSubIdx = MI.getOperand(3);
2028 assert(MOSubIdx.isImm() &&
2029 "One of the subindex of the reg_sequence is not an immediate");
2030 BaseReg.Reg = MOBaseReg.getReg();
2031 BaseReg.SubReg = MOBaseReg.getSubReg();
2032
2033 InsertedReg.Reg = MOInsertedReg.getReg();
2034 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2035 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2036 return true;
2037}
2038
2039// Returns a MIRPrinter comment for this machine operand.
2041 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2042 const TargetRegisterInfo * /*RemoveMe*/) const {
2043
2044 if (!MI.isInlineAsm())
2045 return "";
2046
2047 std::string Flags;
2048 raw_string_ostream OS(Flags);
2049
2051 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2052 unsigned ExtraInfo = Op.getImm();
2053 OS << interleaved(InlineAsm::getExtraInfoNames(ExtraInfo), " ");
2054 return Flags;
2055 }
2056
2057 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2058 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2059 return "";
2060
2061 assert(Op.isImm() && "Expected flag operand to be an immediate");
2062 // Pretty print the inline asm operand descriptor.
2063 unsigned Flag = Op.getImm();
2064 const InlineAsm::Flag F(Flag);
2065 OS << F.getKindName();
2066
2067 unsigned RCID;
2068 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID))
2069 OS << ':' << TRI.getRegClassName(TRI.getRegClass(RCID));
2070
2071 if (F.isMemKind()) {
2072 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2074 }
2075
2076 unsigned TiedTo;
2077 if (F.isUseOperandTiedToDef(TiedTo))
2078 OS << " tiedto:$" << TiedTo;
2079
2080 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2081 F.getRegMayBeFolded())
2082 OS << " foldable";
2083
2084 return Flags;
2085}
2086
2088
2090 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2091 // Include target features from an arbitrary candidate for the outlined
2092 // function. This makes sure the outlined function knows what kinds of
2093 // instructions are going into it. This is fine, since all parent functions
2094 // must necessarily support the instructions that are in the outlined region.
2095 outliner::Candidate &FirstCand = Candidates.front();
2096 const Function &ParentFn = FirstCand.getMF()->getFunction();
2097 if (ParentFn.hasFnAttribute("target-features"))
2098 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2099 if (ParentFn.hasFnAttribute("target-cpu"))
2100 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2101
2102 // Set nounwind, so we don't generate eh_frame.
2103 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2104 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2105 }))
2106 F.addFnAttr(Attribute::NoUnwind);
2107}
2108
2112 unsigned Flags) const {
2113 MachineInstr &MI = *MIT;
2114
2115 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2116 // have support for outlining those. Special-case that here.
2117 if (MI.isCFIInstruction())
2118 // Just go right to the target implementation.
2119 return getOutliningTypeImpl(MMI, MIT, Flags);
2120
2121 // Be conservative about inline assembly.
2122 if (MI.isInlineAsm())
2124
2125 // Labels generally can't safely be outlined.
2126 if (MI.isLabel())
2128
2129 // Don't let debug instructions impact analysis.
2130 if (MI.isDebugInstr())
2132
2133 // Some other special cases.
2134 switch (MI.getOpcode()) {
2135 case TargetOpcode::IMPLICIT_DEF:
2136 case TargetOpcode::KILL:
2137 case TargetOpcode::LIFETIME_START:
2138 case TargetOpcode::LIFETIME_END:
2140 default:
2141 break;
2142 }
2143
2144 // Is this a terminator for a basic block?
2145 if (MI.isTerminator()) {
2146 // If this is a branch to another block, we can't outline it.
2147 if (!MI.getParent()->succ_empty())
2149
2150 // Don't outline if the branch is not unconditional.
2151 if (isPredicated(MI))
2153 }
2154
2155 // Make sure none of the operands of this instruction do anything that
2156 // might break if they're moved outside their current function.
2157 // This includes MachineBasicBlock references, BlockAddressses,
2158 // Constant pool indices and jump table indices.
2159 //
2160 // A quick note on MO_TargetIndex:
2161 // This doesn't seem to be used in any of the architectures that the
2162 // MachineOutliner supports, but it was still filtered out in all of them.
2163 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2164 // As such, this check is removed both here and in the target-specific
2165 // implementations. Instead, we assert to make sure this doesn't
2166 // catch anyone off-guard somewhere down the line.
2167 for (const MachineOperand &MOP : MI.operands()) {
2168 // If you hit this assertion, please remove it and adjust
2169 // `getOutliningTypeImpl` for your target appropriately if necessary.
2170 // Adding the assertion back to other supported architectures
2171 // would be nice too :)
2172 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2173
2174 // CFI instructions should already have been filtered out at this point.
2175 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2176
2177 // PrologEpilogInserter should've already run at this point.
2178 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2179
2180 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2182 }
2183
2184 // If we don't know, delegate to the target-specific hook.
2185 return getOutliningTypeImpl(MMI, MIT, Flags);
2186}
2187
2189 unsigned &Flags) const {
2190 // Some instrumentations create special TargetOpcode at the start which
2191 // expands to special code sequences which must be present.
2192 auto First = MBB.getFirstNonDebugInstr();
2193 if (First == MBB.end())
2194 return true;
2195
2196 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2197 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2198 return false;
2199
2200 // Some instrumentations create special pseudo-instructions at or just before
2201 // the end that must be present.
2202 auto Last = MBB.getLastNonDebugInstr();
2203 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2204 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2205 return false;
2206
2207 if (Last != First && Last->isReturn()) {
2208 --Last;
2209 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2210 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2211 return false;
2212 }
2213 return true;
2214}
2215
2217 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2218 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2219}
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:215
A debug info location.
Definition DebugLoc.h:123
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:763
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:310
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:451
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:475
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:529
StringRef getCommentString() const
Definition MCAsmInfo.h:540
const char * getSeparatorString() const
Definition MCAsmInfo.h:535
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI bool hasOneNonDBGUser(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
const TargetRegisterInfo & TRI
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, LaneBitmask UsedLanes=LaneBitmask::getAll()) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:639
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
@ Length
Definition DWP.cpp:532
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
constexpr RegState getKillRegState(bool B)
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const