LLVM  4.0.0
TargetInstrInfo.cpp
Go to the documentation of this file.
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
21 #include "llvm/CodeGen/StackMaps.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/MC/MCAsmInfo.h"
33 #include <cctype>
34 
35 using namespace llvm;
36 
38  "disable-sched-hazard", cl::Hidden, cl::init(false),
39  cl::desc("Disable hazard detection during preRA scheduling"));
40 
42 }
43 
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46  const TargetRegisterInfo *TRI,
47  const MachineFunction &MF) const {
48  if (OpNum >= MCID.getNumOperands())
49  return nullptr;
50 
51  short RegClass = MCID.OpInfo[OpNum].RegClass;
52  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53  return TRI->getPointerRegClass(MF, RegClass);
54 
55  // Instructions like INSERT_SUBREG do not have fixed register classes.
56  if (RegClass < 0)
57  return nullptr;
58 
59  // Otherwise just look it up normally.
60  return TRI->getRegClass(RegClass);
61 }
62 
63 /// insertNoop - Insert a noop into the instruction stream at the specified
64 /// point.
67  llvm_unreachable("Target didn't implement insertNoop!");
68 }
69 
70 /// Measure the specified inline asm to determine an approximation of its
71 /// length.
72 /// Comments (which run till the next SeparatorString or newline) do not
73 /// count as an instruction.
74 /// Any other non-whitespace text is considered an instruction, with
75 /// multiple instructions separated by SeparatorString or newlines.
76 /// Variable-length instructions are not handled here; this function
77 /// may be overloaded in the target code to do that.
78 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
79  const MCAsmInfo &MAI) const {
80  // Count the number of instructions in the asm.
81  bool atInsnStart = true;
82  unsigned InstCount = 0;
83  for (; *Str; ++Str) {
84  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
85  strlen(MAI.getSeparatorString())) == 0) {
86  atInsnStart = true;
87  } else if (strncmp(Str, MAI.getCommentString().data(),
88  MAI.getCommentString().size()) == 0) {
89  // Stop counting as an instruction after a comment until the next
90  // separator.
91  atInsnStart = false;
92  }
93 
94  if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
95  ++InstCount;
96  atInsnStart = false;
97  }
98  }
99 
100  return InstCount * MAI.getMaxInstLength();
101 }
102 
103 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
104 /// after it, replacing it with an unconditional branch to NewDest.
105 void
107  MachineBasicBlock *NewDest) const {
108  MachineBasicBlock *MBB = Tail->getParent();
109 
110  // Remove all the old successors of MBB from the CFG.
111  while (!MBB->succ_empty())
112  MBB->removeSuccessor(MBB->succ_begin());
113 
114  // Save off the debug loc before erasing the instruction.
115  DebugLoc DL = Tail->getDebugLoc();
116 
117  // Remove all the dead instructions from the end of MBB.
118  MBB->erase(Tail, MBB->end());
119 
120  // If MBB isn't immediately before MBB, insert a branch to it.
122  insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
123  MBB->addSuccessor(NewDest);
124 }
125 
127  bool NewMI, unsigned Idx1,
128  unsigned Idx2) const {
129  const MCInstrDesc &MCID = MI.getDesc();
130  bool HasDef = MCID.getNumDefs();
131  if (HasDef && !MI.getOperand(0).isReg())
132  // No idea how to commute this instruction. Target should implement its own.
133  return nullptr;
134 
135  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
136  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
137  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
138  CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
139  "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
140  assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
141  "This only knows how to commute register operands so far");
142 
143  unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
144  unsigned Reg1 = MI.getOperand(Idx1).getReg();
145  unsigned Reg2 = MI.getOperand(Idx2).getReg();
146  unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
147  unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
148  unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
149  bool Reg1IsKill = MI.getOperand(Idx1).isKill();
150  bool Reg2IsKill = MI.getOperand(Idx2).isKill();
151  bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
152  bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
153  bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
154  bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
155  // If destination is tied to either of the commuted source register, then
156  // it must be updated.
157  if (HasDef && Reg0 == Reg1 &&
158  MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
159  Reg2IsKill = false;
160  Reg0 = Reg2;
161  SubReg0 = SubReg2;
162  } else if (HasDef && Reg0 == Reg2 &&
163  MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
164  Reg1IsKill = false;
165  Reg0 = Reg1;
166  SubReg0 = SubReg1;
167  }
168 
169  MachineInstr *CommutedMI = nullptr;
170  if (NewMI) {
171  // Create a new instruction.
172  MachineFunction &MF = *MI.getParent()->getParent();
173  CommutedMI = MF.CloneMachineInstr(&MI);
174  } else {
175  CommutedMI = &MI;
176  }
177 
178  if (HasDef) {
179  CommutedMI->getOperand(0).setReg(Reg0);
180  CommutedMI->getOperand(0).setSubReg(SubReg0);
181  }
182  CommutedMI->getOperand(Idx2).setReg(Reg1);
183  CommutedMI->getOperand(Idx1).setReg(Reg2);
184  CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
185  CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
186  CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
187  CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
188  CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
189  CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
190  CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
191  CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
192  return CommutedMI;
193 }
194 
196  unsigned OpIdx1,
197  unsigned OpIdx2) const {
198  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
199  // any commutable operand, which is done in findCommutedOpIndices() method
200  // called below.
201  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
202  !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
203  assert(MI.isCommutable() &&
204  "Precondition violation: MI must be commutable.");
205  return nullptr;
206  }
207  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
208 }
209 
210 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
211  unsigned &ResultIdx2,
212  unsigned CommutableOpIdx1,
213  unsigned CommutableOpIdx2) {
214  if (ResultIdx1 == CommuteAnyOperandIndex &&
215  ResultIdx2 == CommuteAnyOperandIndex) {
216  ResultIdx1 = CommutableOpIdx1;
217  ResultIdx2 = CommutableOpIdx2;
218  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
219  if (ResultIdx2 == CommutableOpIdx1)
220  ResultIdx1 = CommutableOpIdx2;
221  else if (ResultIdx2 == CommutableOpIdx2)
222  ResultIdx1 = CommutableOpIdx1;
223  else
224  return false;
225  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
226  if (ResultIdx1 == CommutableOpIdx1)
227  ResultIdx2 = CommutableOpIdx2;
228  else if (ResultIdx1 == CommutableOpIdx2)
229  ResultIdx2 = CommutableOpIdx1;
230  else
231  return false;
232  } else
233  // Check that the result operand indices match the given commutable
234  // operand indices.
235  return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
236  (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
237 
238  return true;
239 }
240 
242  unsigned &SrcOpIdx1,
243  unsigned &SrcOpIdx2) const {
244  assert(!MI.isBundle() &&
245  "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
246 
247  const MCInstrDesc &MCID = MI.getDesc();
248  if (!MCID.isCommutable())
249  return false;
250 
251  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
252  // is not true, then the target must implement this.
253  unsigned CommutableOpIdx1 = MCID.getNumDefs();
254  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
255  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
256  CommutableOpIdx1, CommutableOpIdx2))
257  return false;
258 
259  if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
260  // No idea.
261  return false;
262  return true;
263 }
264 
266  if (!MI.isTerminator()) return false;
267 
268  // Conditional branch is a special case.
269  if (MI.isBranch() && !MI.isBarrier())
270  return true;
271  if (!MI.isPredicable())
272  return true;
273  return !isPredicated(MI);
274 }
275 
278  bool MadeChange = false;
279 
280  assert(!MI.isBundle() &&
281  "TargetInstrInfo::PredicateInstruction() can't handle bundles");
282 
283  const MCInstrDesc &MCID = MI.getDesc();
284  if (!MI.isPredicable())
285  return false;
286 
287  for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
288  if (MCID.OpInfo[i].isPredicate()) {
289  MachineOperand &MO = MI.getOperand(i);
290  if (MO.isReg()) {
291  MO.setReg(Pred[j].getReg());
292  MadeChange = true;
293  } else if (MO.isImm()) {
294  MO.setImm(Pred[j].getImm());
295  MadeChange = true;
296  } else if (MO.isMBB()) {
297  MO.setMBB(Pred[j].getMBB());
298  MadeChange = true;
299  }
300  ++j;
301  }
302  }
303  return MadeChange;
304 }
305 
307  const MachineMemOperand *&MMO,
308  int &FrameIndex) const {
310  oe = MI.memoperands_end();
311  o != oe; ++o) {
312  if ((*o)->isLoad()) {
314  dyn_cast_or_null<FixedStackPseudoSourceValue>(
315  (*o)->getPseudoValue())) {
316  FrameIndex = Value->getFrameIndex();
317  MMO = *o;
318  return true;
319  }
320  }
321  }
322  return false;
323 }
324 
326  const MachineMemOperand *&MMO,
327  int &FrameIndex) const {
329  oe = MI.memoperands_end();
330  o != oe; ++o) {
331  if ((*o)->isStore()) {
333  dyn_cast_or_null<FixedStackPseudoSourceValue>(
334  (*o)->getPseudoValue())) {
335  FrameIndex = Value->getFrameIndex();
336  MMO = *o;
337  return true;
338  }
339  }
340  }
341  return false;
342 }
343 
345  unsigned SubIdx, unsigned &Size,
346  unsigned &Offset,
347  const MachineFunction &MF) const {
348  if (!SubIdx) {
349  Size = RC->getSize();
350  Offset = 0;
351  return true;
352  }
353  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
354  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
355  // Convert bit size to byte size to be consistent with
356  // MCRegisterClass::getSize().
357  if (BitSize % 8)
358  return false;
359 
360  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
361  if (BitOffset < 0 || BitOffset % 8)
362  return false;
363 
364  Size = BitSize /= 8;
365  Offset = (unsigned)BitOffset / 8;
366 
367  assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
368 
369  if (!MF.getDataLayout().isLittleEndian()) {
370  Offset = RC->getSize() - (Offset + Size);
371  }
372  return true;
373 }
374 
377  unsigned DestReg, unsigned SubIdx,
378  const MachineInstr &Orig,
379  const TargetRegisterInfo &TRI) const {
380  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
381  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
382  MBB.insert(I, MI);
383 }
384 
386  const MachineInstr &MI1,
387  const MachineRegisterInfo *MRI) const {
389 }
390 
392  MachineFunction &MF) const {
393  assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
394  return MF.CloneMachineInstr(&Orig);
395 }
396 
397 // If the COPY instruction in MI can be folded to a stack operation, return
398 // the register class to use.
400  unsigned FoldIdx) {
401  assert(MI.isCopy() && "MI must be a COPY instruction");
402  if (MI.getNumOperands() != 2)
403  return nullptr;
404  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
405 
406  const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
407  const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
408 
409  if (FoldOp.getSubReg() || LiveOp.getSubReg())
410  return nullptr;
411 
412  unsigned FoldReg = FoldOp.getReg();
413  unsigned LiveReg = LiveOp.getReg();
414 
416  "Cannot fold physregs");
417 
419  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
420 
422  return RC->contains(LiveOp.getReg()) ? RC : nullptr;
423 
424  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
425  return RC;
426 
427  // FIXME: Allow folding when register classes are memory compatible.
428  return nullptr;
429 }
430 
432  llvm_unreachable("Not a MachO target");
433 }
434 
437  const TargetInstrInfo &TII) {
438  unsigned StartIdx = 0;
439  switch (MI.getOpcode()) {
440  case TargetOpcode::STACKMAP: {
441  // StackMapLiveValues are foldable
442  StartIdx = StackMapOpers(&MI).getVarIdx();
443  break;
444  }
445  case TargetOpcode::PATCHPOINT: {
446  // For PatchPoint, the call args are not foldable (even if reported in the
447  // stackmap e.g. via anyregcc).
448  StartIdx = PatchPointOpers(&MI).getVarIdx();
449  break;
450  }
451  case TargetOpcode::STATEPOINT: {
452  // For statepoints, fold deopt and gc arguments, but not call arguments.
453  StartIdx = StatepointOpers(&MI).getVarIdx();
454  break;
455  }
456  default:
457  llvm_unreachable("unexpected stackmap opcode");
458  }
459 
460  // Return false if any operands requested for folding are not foldable (not
461  // part of the stackmap's live values).
462  for (unsigned Op : Ops) {
463  if (Op < StartIdx)
464  return nullptr;
465  }
466 
467  MachineInstr *NewMI =
468  MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
469  MachineInstrBuilder MIB(MF, NewMI);
470 
471  // No need to fold return, the meta data, and function arguments
472  for (unsigned i = 0; i < StartIdx; ++i)
473  MIB.addOperand(MI.getOperand(i));
474 
475  for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
476  MachineOperand &MO = MI.getOperand(i);
477  if (is_contained(Ops, i)) {
478  unsigned SpillSize;
479  unsigned SpillOffset;
480  // Compute the spill slot size and offset.
481  const TargetRegisterClass *RC =
482  MF.getRegInfo().getRegClass(MO.getReg());
483  bool Valid =
484  TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
485  if (!Valid)
486  report_fatal_error("cannot spill patchpoint subregister operand");
488  MIB.addImm(SpillSize);
489  MIB.addFrameIndex(FrameIndex);
490  MIB.addImm(SpillOffset);
491  }
492  else
493  MIB.addOperand(MO);
494  }
495  return NewMI;
496 }
497 
498 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
499 /// slot into the specified machine instruction for the specified operand(s).
500 /// If this is possible, a new instruction is returned with the specified
501 /// operand folded, otherwise NULL is returned. The client is responsible for
502 /// removing the old instruction and adding the new one in the instruction
503 /// stream.
505  ArrayRef<unsigned> Ops, int FI,
506  LiveIntervals *LIS) const {
508  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
509  if (MI.getOperand(Ops[i]).isDef())
511  else
513 
515  assert(MBB && "foldMemoryOperand needs an inserted instruction");
516  MachineFunction &MF = *MBB->getParent();
517 
518  // If we're not folding a load into a subreg, the size of the load is the
519  // size of the spill slot. But if we are, we need to figure out what the
520  // actual load size is.
521  int64_t MemSize = 0;
522  const MachineFrameInfo &MFI = MF.getFrameInfo();
523  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
524 
526  MemSize = MFI.getObjectSize(FI);
527  } else {
528  for (unsigned Idx : Ops) {
529  int64_t OpSize = MFI.getObjectSize(FI);
530 
531  if (auto SubReg = MI.getOperand(Idx).getSubReg()) {
532  unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
533  if (SubRegSize > 0 && !(SubRegSize % 8))
534  OpSize = SubRegSize / 8;
535  }
536 
537  MemSize = std::max(MemSize, OpSize);
538  }
539  }
540 
541  assert(MemSize && "Did not expect a zero-sized stack slot");
542 
543  MachineInstr *NewMI = nullptr;
544 
545  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
546  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
547  MI.getOpcode() == TargetOpcode::STATEPOINT) {
548  // Fold stackmap/patchpoint.
549  NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
550  if (NewMI)
551  MBB->insert(MI, NewMI);
552  } else {
553  // Ask the target to do the actual folding.
554  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
555  }
556 
557  if (NewMI) {
558  NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
559  // Add a memory operand, foldMemoryOperandImpl doesn't do that.
560  assert((!(Flags & MachineMemOperand::MOStore) ||
561  NewMI->mayStore()) &&
562  "Folded a def to a non-store!");
564  NewMI->mayLoad()) &&
565  "Folded a use to a non-load!");
566  assert(MFI.getObjectOffset(FI) != -1);
568  MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
569  MFI.getObjectAlignment(FI));
570  NewMI->addMemOperand(MF, MMO);
571 
572  return NewMI;
573  }
574 
575  // Straight COPY may fold as load/store.
576  if (!MI.isCopy() || Ops.size() != 1)
577  return nullptr;
578 
579  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
580  if (!RC)
581  return nullptr;
582 
583  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
585 
586  if (Flags == MachineMemOperand::MOStore)
587  storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
588  else
589  loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
590  return &*--Pos;
591 }
592 
594  const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
595  const MachineOperand &Op1 = Inst.getOperand(1);
596  const MachineOperand &Op2 = Inst.getOperand(2);
597  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
598 
599  // We need virtual register definitions for the operands that we will
600  // reassociate.
601  MachineInstr *MI1 = nullptr;
602  MachineInstr *MI2 = nullptr;
604  MI1 = MRI.getUniqueVRegDef(Op1.getReg());
606  MI2 = MRI.getUniqueVRegDef(Op2.getReg());
607 
608  // And they need to be in the trace (otherwise, they won't have a depth).
609  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
610 }
611 
613  bool &Commuted) const {
614  const MachineBasicBlock *MBB = Inst.getParent();
615  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
616  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
617  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
618  unsigned AssocOpcode = Inst.getOpcode();
619 
620  // If only one operand has the same opcode and it's the second source operand,
621  // the operands must be commuted.
622  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
623  if (Commuted)
624  std::swap(MI1, MI2);
625 
626  // 1. The previous instruction must be the same type as Inst.
627  // 2. The previous instruction must have virtual register definitions for its
628  // operands in the same basic block as Inst.
629  // 3. The previous instruction's result must only be used by Inst.
630  return MI1->getOpcode() == AssocOpcode &&
631  hasReassociableOperands(*MI1, MBB) &&
632  MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
633 }
634 
635 // 1. The operation must be associative and commutative.
636 // 2. The instruction must have virtual register definitions for its
637 // operands in the same basic block.
638 // 3. The instruction must have a reassociable sibling.
640  bool &Commuted) const {
641  return isAssociativeAndCommutative(Inst) &&
642  hasReassociableOperands(Inst, Inst.getParent()) &&
643  hasReassociableSibling(Inst, Commuted);
644 }
645 
646 // The concept of the reassociation pass is that these operations can benefit
647 // from this kind of transformation:
648 //
649 // A = ? op ?
650 // B = A op X (Prev)
651 // C = B op Y (Root)
652 // -->
653 // A = ? op ?
654 // B = X op Y
655 // C = A op B
656 //
657 // breaking the dependency between A and B, allowing them to be executed in
658 // parallel (or back-to-back in a pipeline) instead of depending on each other.
659 
660 // FIXME: This has the potential to be expensive (compile time) while not
661 // improving the code at all. Some ways to limit the overhead:
662 // 1. Track successful transforms; bail out if hit rate gets too low.
663 // 2. Only enable at -O3 or some other non-default optimization level.
664 // 3. Pre-screen pattern candidates here: if an operand of the previous
665 // instruction is known to not increase the critical path, then don't match
666 // that pattern.
668  MachineInstr &Root,
669  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
670  bool Commute;
671  if (isReassociationCandidate(Root, Commute)) {
672  // We found a sequence of instructions that may be suitable for a
673  // reassociation of operands to increase ILP. Specify each commutation
674  // possibility for the Prev instruction in the sequence and let the
675  // machine combiner decide if changing the operands is worthwhile.
676  if (Commute) {
679  } else {
682  }
683  return true;
684  }
685 
686  return false;
687 }
688 /// Return true when a code sequence can improve loop throughput.
689 bool
691  return false;
692 }
693 /// Attempt the reassociation transformation to reduce critical path length.
694 /// See the above comments before getMachineCombinerPatterns().
696  MachineInstr &Root, MachineInstr &Prev,
697  MachineCombinerPattern Pattern,
700  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
701  MachineFunction *MF = Root.getParent()->getParent();
703  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
704  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
705  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
706 
707  // This array encodes the operand index for each parameter because the
708  // operands may be commuted. Each row corresponds to a pattern value,
709  // and each column specifies the index of A, B, X, Y.
710  unsigned OpIdx[4][4] = {
711  { 1, 1, 2, 2 },
712  { 1, 2, 2, 1 },
713  { 2, 1, 1, 2 },
714  { 2, 2, 1, 1 }
715  };
716 
717  int Row;
718  switch (Pattern) {
719  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
720  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
721  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
722  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
723  default: llvm_unreachable("unexpected MachineCombinerPattern");
724  }
725 
726  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
727  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
728  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
729  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
730  MachineOperand &OpC = Root.getOperand(0);
731 
732  unsigned RegA = OpA.getReg();
733  unsigned RegB = OpB.getReg();
734  unsigned RegX = OpX.getReg();
735  unsigned RegY = OpY.getReg();
736  unsigned RegC = OpC.getReg();
737 
739  MRI.constrainRegClass(RegA, RC);
741  MRI.constrainRegClass(RegB, RC);
743  MRI.constrainRegClass(RegX, RC);
745  MRI.constrainRegClass(RegY, RC);
747  MRI.constrainRegClass(RegC, RC);
748 
749  // Create a new virtual register for the result of (X op Y) instead of
750  // recycling RegB because the MachineCombiner's computation of the critical
751  // path requires a new register definition rather than an existing one.
752  unsigned NewVR = MRI.createVirtualRegister(RC);
753  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
754 
755  unsigned Opcode = Root.getOpcode();
756  bool KillA = OpA.isKill();
757  bool KillX = OpX.isKill();
758  bool KillY = OpY.isKill();
759 
760  // Create new instructions for insertion.
761  MachineInstrBuilder MIB1 =
762  BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
763  .addReg(RegX, getKillRegState(KillX))
764  .addReg(RegY, getKillRegState(KillY));
765  MachineInstrBuilder MIB2 =
766  BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
767  .addReg(RegA, getKillRegState(KillA))
768  .addReg(NewVR, getKillRegState(true));
769 
770  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
771 
772  // Record new instructions for insertion and old instructions for deletion.
773  InsInstrs.push_back(MIB1);
774  InsInstrs.push_back(MIB2);
775  DelInstrs.push_back(&Prev);
776  DelInstrs.push_back(&Root);
777 }
778 
780  MachineInstr &Root, MachineCombinerPattern Pattern,
783  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
785 
786  // Select the previous instruction in the sequence based on the input pattern.
787  MachineInstr *Prev = nullptr;
788  switch (Pattern) {
791  Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
792  break;
795  Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
796  break;
797  default:
798  break;
799  }
800 
801  assert(Prev && "Unknown pattern for machine combiner");
802 
803  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
804 }
805 
806 /// foldMemoryOperand - Same as the previous version except it allows folding
807 /// of any load and store from / to any address, not just from a specific
808 /// stack slot.
810  ArrayRef<unsigned> Ops,
811  MachineInstr &LoadMI,
812  LiveIntervals *LIS) const {
813  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
814 #ifndef NDEBUG
815  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
816  assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!");
817 #endif
819  MachineFunction &MF = *MBB.getParent();
820 
821  // Ask the target to do the actual folding.
822  MachineInstr *NewMI = nullptr;
823  int FrameIndex = 0;
824 
825  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
826  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
827  MI.getOpcode() == TargetOpcode::STATEPOINT) &&
828  isLoadFromStackSlot(LoadMI, FrameIndex)) {
829  // Fold stackmap/patchpoint.
830  NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
831  if (NewMI)
832  NewMI = &*MBB.insert(MI, NewMI);
833  } else {
834  // Ask the target to do the actual folding.
835  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
836  }
837 
838  if (!NewMI) return nullptr;
839 
840  // Copy the memoperands from the load to the folded instruction.
841  if (MI.memoperands_empty()) {
842  NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
843  }
844  else {
845  // Handle the rare case of folding multiple loads.
846  NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
848  E = LoadMI.memoperands_end();
849  I != E; ++I) {
850  NewMI->addMemOperand(MF, *I);
851  }
852  }
853  return NewMI;
854 }
855 
856 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
857  const MachineInstr &MI, AliasAnalysis *AA) const {
858  const MachineFunction &MF = *MI.getParent()->getParent();
859  const MachineRegisterInfo &MRI = MF.getRegInfo();
860 
861  // Remat clients assume operand 0 is the defined register.
862  if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
863  return false;
864  unsigned DefReg = MI.getOperand(0).getReg();
865 
866  // A sub-register definition can only be rematerialized if the instruction
867  // doesn't read the other parts of the register. Otherwise it is really a
868  // read-modify-write operation on the full virtual register which cannot be
869  // moved safely.
871  MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
872  return false;
873 
874  // A load from a fixed stack slot can be rematerialized. This may be
875  // redundant with subsequent checks, but it's target-independent,
876  // simple, and a common case.
877  int FrameIdx = 0;
878  if (isLoadFromStackSlot(MI, FrameIdx) &&
879  MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
880  return true;
881 
882  // Avoid instructions obviously unsafe for remat.
883  if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
884  return false;
885 
886  // Don't remat inline asm. We have no idea how expensive it is
887  // even if it's side effect free.
888  if (MI.isInlineAsm())
889  return false;
890 
891  // Avoid instructions which load from potentially varying memory.
892  if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
893  return false;
894 
895  // If any of the registers accessed are non-constant, conservatively assume
896  // the instruction is not rematerializable.
897  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
898  const MachineOperand &MO = MI.getOperand(i);
899  if (!MO.isReg()) continue;
900  unsigned Reg = MO.getReg();
901  if (Reg == 0)
902  continue;
903 
904  // Check for a well-behaved physical register.
906  if (MO.isUse()) {
907  // If the physreg has no defs anywhere, it's just an ambient register
908  // and we can freely move its uses. Alternatively, if it's allocatable,
909  // it could get allocated to something with a def during allocation.
910  if (!MRI.isConstantPhysReg(Reg))
911  return false;
912  } else {
913  // A physreg def. We can't remat it.
914  return false;
915  }
916  continue;
917  }
918 
919  // Only allow one virtual-register def. There may be multiple defs of the
920  // same virtual register, though.
921  if (MO.isDef() && Reg != DefReg)
922  return false;
923 
924  // Don't allow any virtual-register uses. Rematting an instruction with
925  // virtual register uses would length the live ranges of the uses, which
926  // is not necessarily a good idea, certainly not "trivial".
927  if (MO.isUse())
928  return false;
929  }
930 
931  // Everything checked out.
932  return true;
933 }
934 
936  const MachineFunction *MF = MI.getParent()->getParent();
938  bool StackGrowsDown =
940 
941  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
942  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
943 
944  if (MI.getOpcode() != FrameSetupOpcode &&
945  MI.getOpcode() != FrameDestroyOpcode)
946  return 0;
947 
948  int SPAdj = MI.getOperand(0).getImm();
949  SPAdj = TFI->alignSPAdjust(SPAdj);
950 
951  if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
952  (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
953  SPAdj = -SPAdj;
954 
955  return SPAdj;
956 }
957 
958 /// isSchedulingBoundary - Test if the given instruction should be
959 /// considered a scheduling boundary. This primarily includes labels
960 /// and terminators.
962  const MachineBasicBlock *MBB,
963  const MachineFunction &MF) const {
964  // Terminators and labels can't be scheduled around.
965  if (MI.isTerminator() || MI.isPosition())
966  return true;
967 
968  // Don't attempt to schedule around any instruction that defines
969  // a stack-oriented pointer, as it's unlikely to be profitable. This
970  // saves compile time, because it doesn't require every single
971  // stack slot reference to depend on the instruction that does the
972  // modification.
973  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
974  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
976 }
977 
978 // Provide a global flag for disabling the PreRA hazard recognizer that targets
979 // may choose to honor.
981  return !DisableHazardRecognizer;
982 }
983 
984 // Default implementation of CreateTargetRAHazardRecognizer.
987  const ScheduleDAG *DAG) const {
988  // Dummy hazard recognizer allows all instructions to issue.
989  return new ScheduleHazardRecognizer();
990 }
991 
992 // Default implementation of CreateTargetMIHazardRecognizer.
995  const ScheduleDAG *DAG) const {
996  return (ScheduleHazardRecognizer *)
997  new ScoreboardHazardRecognizer(II, DAG, "misched");
998 }
999 
1000 // Default implementation of CreateTargetPostRAHazardRecognizer.
1003  const ScheduleDAG *DAG) const {
1004  return (ScheduleHazardRecognizer *)
1005  new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1006 }
1007 
1008 //===----------------------------------------------------------------------===//
1009 // SelectionDAG latency interface.
1010 //===----------------------------------------------------------------------===//
1011 
1012 int
1014  SDNode *DefNode, unsigned DefIdx,
1015  SDNode *UseNode, unsigned UseIdx) const {
1016  if (!ItinData || ItinData->isEmpty())
1017  return -1;
1018 
1019  if (!DefNode->isMachineOpcode())
1020  return -1;
1021 
1022  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1023  if (!UseNode->isMachineOpcode())
1024  return ItinData->getOperandCycle(DefClass, DefIdx);
1025  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1026  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1027 }
1028 
1030  SDNode *N) const {
1031  if (!ItinData || ItinData->isEmpty())
1032  return 1;
1033 
1034  if (!N->isMachineOpcode())
1035  return 1;
1036 
1037  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1038 }
1039 
1040 //===----------------------------------------------------------------------===//
1041 // MachineInstr latency interface.
1042 //===----------------------------------------------------------------------===//
1043 
1045  const MachineInstr &MI) const {
1046  if (!ItinData || ItinData->isEmpty())
1047  return 1;
1048 
1049  unsigned Class = MI.getDesc().getSchedClass();
1050  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1051  if (UOps >= 0)
1052  return UOps;
1053 
1054  // The # of u-ops is dynamically determined. The specific target should
1055  // override this function to return the right number.
1056  return 1;
1057 }
1058 
1059 /// Return the default expected latency for a def based on it's opcode.
1061  const MachineInstr &DefMI) const {
1062  if (DefMI.isTransient())
1063  return 0;
1064  if (DefMI.mayLoad())
1065  return SchedModel.LoadLatency;
1066  if (isHighLatencyDef(DefMI.getOpcode()))
1067  return SchedModel.HighLatency;
1068  return 1;
1069 }
1070 
1072  return 0;
1073 }
1074 
1076  const MachineInstr &MI,
1077  unsigned *PredCost) const {
1078  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1079  // still have a MinLatency property, which getStageLatency checks.
1080  if (!ItinData)
1081  return MI.mayLoad() ? 2 : 1;
1082 
1083  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1084 }
1085 
1087  const MachineInstr &DefMI,
1088  unsigned DefIdx) const {
1089  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1090  if (!ItinData || ItinData->isEmpty())
1091  return false;
1092 
1093  unsigned DefClass = DefMI.getDesc().getSchedClass();
1094  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1095  return (DefCycle != -1 && DefCycle <= 1);
1096 }
1097 
1098 /// Both DefMI and UseMI must be valid. By default, call directly to the
1099 /// itinerary. This may be overriden by the target.
1101  const MachineInstr &DefMI,
1102  unsigned DefIdx,
1103  const MachineInstr &UseMI,
1104  unsigned UseIdx) const {
1105  unsigned DefClass = DefMI.getDesc().getSchedClass();
1106  unsigned UseClass = UseMI.getDesc().getSchedClass();
1107  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1108 }
1109 
1110 /// If we can determine the operand latency from the def only, without itinerary
1111 /// lookup, do so. Otherwise return -1.
1113  const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1114 
1115  // Let the target hook getInstrLatency handle missing itineraries.
1116  if (!ItinData)
1117  return getInstrLatency(ItinData, DefMI);
1118 
1119  if(ItinData->isEmpty())
1120  return defaultDefLatency(ItinData->SchedModel, DefMI);
1121 
1122  // ...operand lookup required
1123  return -1;
1124 }
1125 
1127  const MachineInstr &MI, unsigned DefIdx,
1128  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1129  assert((MI.isRegSequence() ||
1130  MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1131 
1132  if (!MI.isRegSequence())
1133  return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1134 
1135  // We are looking at:
1136  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1137  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1138  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1139  OpIdx += 2) {
1140  const MachineOperand &MOReg = MI.getOperand(OpIdx);
1141  const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1142  assert(MOSubIdx.isImm() &&
1143  "One of the subindex of the reg_sequence is not an immediate");
1144  // Record Reg:SubReg, SubIdx.
1145  InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1146  (unsigned)MOSubIdx.getImm()));
1147  }
1148  return true;
1149 }
1150 
1152  const MachineInstr &MI, unsigned DefIdx,
1153  RegSubRegPairAndIdx &InputReg) const {
1154  assert((MI.isExtractSubreg() ||
1155  MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1156 
1157  if (!MI.isExtractSubreg())
1158  return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1159 
1160  // We are looking at:
1161  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1162  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1163  const MachineOperand &MOReg = MI.getOperand(1);
1164  const MachineOperand &MOSubIdx = MI.getOperand(2);
1165  assert(MOSubIdx.isImm() &&
1166  "The subindex of the extract_subreg is not an immediate");
1167 
1168  InputReg.Reg = MOReg.getReg();
1169  InputReg.SubReg = MOReg.getSubReg();
1170  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1171  return true;
1172 }
1173 
1175  const MachineInstr &MI, unsigned DefIdx,
1176  RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1177  assert((MI.isInsertSubreg() ||
1178  MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1179 
1180  if (!MI.isInsertSubreg())
1181  return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1182 
1183  // We are looking at:
1184  // Def = INSERT_SEQUENCE v0, v1, sub0.
1185  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1186  const MachineOperand &MOBaseReg = MI.getOperand(1);
1187  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1188  const MachineOperand &MOSubIdx = MI.getOperand(3);
1189  assert(MOSubIdx.isImm() &&
1190  "One of the subindex of the reg_sequence is not an immediate");
1191  BaseReg.Reg = MOBaseReg.getReg();
1192  BaseReg.SubReg = MOBaseReg.getSubReg();
1193 
1194  InsertedReg.Reg = MOInsertedReg.getReg();
1195  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1196  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1197  return true;
1198 }
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
virtual MachineInstr * duplicate(MachineInstr &Orig, MachineFunction &MF) const
Create a duplicate of the Orig instruction in MF.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:448
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
size_t i
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:216
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:605
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
void setIsUndef(bool Val=true)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:478
bool readsVirtualRegister(unsigned Reg) const
Return true if the MachineInstr reads the specified virtual register.
Definition: MachineInstr.h:873
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:270
A debug info location.
Definition: DebugLoc.h:34
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:538
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:121
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
bool isExtractSubreg() const
Definition: MachineInstr.h:813
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:440
int NumMicroOps
of micro-ops, -1 means it's variable
StringRef getCommentString() const
Definition: MCAsmInfo.h:471
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:172
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
A description of a memory reference used in the backend.
unsigned getCallFrameDestroyOpcode() const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
struct fuzzer::@269 Flags
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const InstrItinerary * Itineraries
Array of itineraries selected.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned SubReg
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:592
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
bool isUndef() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:277
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
bool isKill() const
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, LiveIntervals *LIS=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
MachineBasicBlock * MBB
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:220
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
Itinerary data supplied by a subtarget to be used by a target.
bool isImmutableObjectIndex(int ObjectIdx) const
isImmutableObjectIndex - Returns true if the specified index corresponds to an immutable object...
int64_t getImm() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:141
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:150
unsigned getKillRegState(bool B)
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
TargetInstrInfo - Interface to description of machine instruction set.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:57
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:359
bool isInsertSubreg() const
Definition: MachineInstr.h:795
bool isBundle() const
Definition: MachineInstr.h:804
static const unsigned CommuteAnyOperandIndex
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
unsigned LoadLatency
Definition: MCSchedule.h:168
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
const InstrItineraryData * getInstrItineraries() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineInstrBuilder & UseMI
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
void setMBB(MachineBasicBlock *MBB)
bool isCopy() const
Definition: MachineInstr.h:807
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
bool isPosition() const
Definition: MachineInstr.h:775
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment. ...
uint32_t Offset
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
void setImm(int64_t immVal)
void setIsInternalRead(bool Val=true)
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
MI-level patchpoint operands.
Definition: StackMaps.h:70
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
unsigned getSubReg() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
Definition: MachineInstr.h:508
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:187
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
void setIsKill(bool Val=true)
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
The memory access writes data.
bool isInsertSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic INSERT_SUBREG instructions...
Definition: MachineInstr.h:581
virtual const TargetFrameLowering * getFrameLowering() const
virtual unsigned getPredicationCost(const MachineInstr &MI) const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the the memory access done by this i...
Definition: MachineInstr.h:363
Iterator for intrusive lists based on ilist_node.
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
CloneMachineInstr - Create a new MachineInstr which is a copy of the 'Orig' instruction, identical in all ways except the instruction has no parent, prev, or next.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
A pair composed of a register and a sub-register index.
virtual const TargetLowering * getTargetLowering() const
bool isInlineAsm() const
Definition: MachineInstr.h:789
Information about stack frame layout on the target.
unsigned HighLatency
Definition: MCSchedule.h:175
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
Represents one node in the SelectionDAG.
const MachineInstrBuilder & addFrameIndex(int Idx) const
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
Definition: MachineInstr.h:833
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:586
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:567
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
Definition: MachineInstr.h:52
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:556
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
Measure the specified inline asm to determine an approximation of its length.
MCSchedModel SchedModel
Basic machine properties.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
Definition: StackMaps.h:29
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:76
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
const char * getSeparatorString() const
Definition: MCAsmInfo.h:465
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
void setSubReg(unsigned subReg)
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
MI-level Statepoint operands.
Definition: StackMaps.h:145
bool isConstantPhysReg(unsigned PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
virtual void getNoopForMachoTarget(MCInst &NopInst) const
Return the noop instruction to use for a noop.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class...
Definition: MCInstrDesc.h:90
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:50
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
unsigned getReg() const
getReg - Returns the register number.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:633
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
virtual const TargetInstrInfo * getInstrInfo() const
LLVM Value Representation.
Definition: Value.h:71
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:462
virtual bool hasStoreToStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:174
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
BasicBlockListType::iterator iterator
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:125
IRTranslator LLVM IR MI
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
unsigned getVarIdx() const
Get starting index of non call related arguments (calling convention, statepoint flags, vm state and gc state).
Definition: StackMaps.h:160
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index...
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index.
bool isRegSequence() const
Definition: MachineInstr.h:801
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
bool isRegSequenceLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic REG_SEQUENCE instructions.
Definition: MachineInstr.h:552
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:136
bool isEmpty() const
Returns true if there are no itineraries.
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr's memory reference descriptor list.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
Definition: MachineInstr.h:903
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:431
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
bool isInternalRead() const
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
This file describes how to lower LLVM code to machine code.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
A pair composed of a pair of a register and a sub-register index, and another sub-register index...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:358
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:783
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.