LLVM  6.0.0svn
TargetInstrInfo.cpp
Go to the documentation of this file.
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
21 #include "llvm/CodeGen/StackMaps.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/MC/MCAsmInfo.h"
33 #include <cctype>
34 
35 using namespace llvm;
36 
38  "disable-sched-hazard", cl::Hidden, cl::init(false),
39  cl::desc("Disable hazard detection during preRA scheduling"));
40 
42 }
43 
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46  const TargetRegisterInfo *TRI,
47  const MachineFunction &MF) const {
48  if (OpNum >= MCID.getNumOperands())
49  return nullptr;
50 
51  short RegClass = MCID.OpInfo[OpNum].RegClass;
52  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53  return TRI->getPointerRegClass(MF, RegClass);
54 
55  // Instructions like INSERT_SUBREG do not have fixed register classes.
56  if (RegClass < 0)
57  return nullptr;
58 
59  // Otherwise just look it up normally.
60  return TRI->getRegClass(RegClass);
61 }
62 
63 /// insertNoop - Insert a noop into the instruction stream at the specified
64 /// point.
67  llvm_unreachable("Target didn't implement insertNoop!");
68 }
69 
70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
71  return strncmp(Str, MAI.getCommentString().data(),
72  MAI.getCommentString().size()) == 0;
73 }
74 
75 /// Measure the specified inline asm to determine an approximation of its
76 /// length.
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
89 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
90  const MCAsmInfo &MAI) const {
91  // Count the number of instructions in the asm.
92  bool AtInsnStart = true;
93  unsigned Length = 0;
94  for (; *Str; ++Str) {
95  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
96  strlen(MAI.getSeparatorString())) == 0) {
97  AtInsnStart = true;
98  } else if (isAsmComment(Str, MAI)) {
99  // Stop counting as an instruction after a comment until the next
100  // separator.
101  AtInsnStart = false;
102  }
103 
104  if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
105  unsigned AddLength = MAI.getMaxInstLength();
106  if (strncmp(Str, ".space", 6) == 0) {
107  char *EStr;
108  int SpaceSize;
109  SpaceSize = strtol(Str + 6, &EStr, 10);
110  SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
111  while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
112  ++EStr;
113  if (*EStr == '\0' || *EStr == '\n' ||
114  isAsmComment(EStr, MAI)) // Successfully parsed .space argument
115  AddLength = SpaceSize;
116  }
117  Length += AddLength;
118  AtInsnStart = false;
119  }
120  }
121 
122  return Length;
123 }
124 
125 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
126 /// after it, replacing it with an unconditional branch to NewDest.
127 void
129  MachineBasicBlock *NewDest) const {
130  MachineBasicBlock *MBB = Tail->getParent();
131 
132  // Remove all the old successors of MBB from the CFG.
133  while (!MBB->succ_empty())
134  MBB->removeSuccessor(MBB->succ_begin());
135 
136  // Save off the debug loc before erasing the instruction.
137  DebugLoc DL = Tail->getDebugLoc();
138 
139  // Remove all the dead instructions from the end of MBB.
140  MBB->erase(Tail, MBB->end());
141 
142  // If MBB isn't immediately before MBB, insert a branch to it.
144  insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
145  MBB->addSuccessor(NewDest);
146 }
147 
149  bool NewMI, unsigned Idx1,
150  unsigned Idx2) const {
151  const MCInstrDesc &MCID = MI.getDesc();
152  bool HasDef = MCID.getNumDefs();
153  if (HasDef && !MI.getOperand(0).isReg())
154  // No idea how to commute this instruction. Target should implement its own.
155  return nullptr;
156 
157  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
158  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
159  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
160  CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
161  "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
162  assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
163  "This only knows how to commute register operands so far");
164 
165  unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
166  unsigned Reg1 = MI.getOperand(Idx1).getReg();
167  unsigned Reg2 = MI.getOperand(Idx2).getReg();
168  unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
169  unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
170  unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
171  bool Reg1IsKill = MI.getOperand(Idx1).isKill();
172  bool Reg2IsKill = MI.getOperand(Idx2).isKill();
173  bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
174  bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
175  bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
176  bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
177  // If destination is tied to either of the commuted source register, then
178  // it must be updated.
179  if (HasDef && Reg0 == Reg1 &&
180  MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
181  Reg2IsKill = false;
182  Reg0 = Reg2;
183  SubReg0 = SubReg2;
184  } else if (HasDef && Reg0 == Reg2 &&
185  MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
186  Reg1IsKill = false;
187  Reg0 = Reg1;
188  SubReg0 = SubReg1;
189  }
190 
191  MachineInstr *CommutedMI = nullptr;
192  if (NewMI) {
193  // Create a new instruction.
194  MachineFunction &MF = *MI.getMF();
195  CommutedMI = MF.CloneMachineInstr(&MI);
196  } else {
197  CommutedMI = &MI;
198  }
199 
200  if (HasDef) {
201  CommutedMI->getOperand(0).setReg(Reg0);
202  CommutedMI->getOperand(0).setSubReg(SubReg0);
203  }
204  CommutedMI->getOperand(Idx2).setReg(Reg1);
205  CommutedMI->getOperand(Idx1).setReg(Reg2);
206  CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
207  CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
208  CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
209  CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
210  CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
211  CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
212  CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
213  CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
214  return CommutedMI;
215 }
216 
218  unsigned OpIdx1,
219  unsigned OpIdx2) const {
220  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
221  // any commutable operand, which is done in findCommutedOpIndices() method
222  // called below.
223  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
224  !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
225  assert(MI.isCommutable() &&
226  "Precondition violation: MI must be commutable.");
227  return nullptr;
228  }
229  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
230 }
231 
232 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
233  unsigned &ResultIdx2,
234  unsigned CommutableOpIdx1,
235  unsigned CommutableOpIdx2) {
236  if (ResultIdx1 == CommuteAnyOperandIndex &&
237  ResultIdx2 == CommuteAnyOperandIndex) {
238  ResultIdx1 = CommutableOpIdx1;
239  ResultIdx2 = CommutableOpIdx2;
240  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
241  if (ResultIdx2 == CommutableOpIdx1)
242  ResultIdx1 = CommutableOpIdx2;
243  else if (ResultIdx2 == CommutableOpIdx2)
244  ResultIdx1 = CommutableOpIdx1;
245  else
246  return false;
247  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
248  if (ResultIdx1 == CommutableOpIdx1)
249  ResultIdx2 = CommutableOpIdx2;
250  else if (ResultIdx1 == CommutableOpIdx2)
251  ResultIdx2 = CommutableOpIdx1;
252  else
253  return false;
254  } else
255  // Check that the result operand indices match the given commutable
256  // operand indices.
257  return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
258  (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
259 
260  return true;
261 }
262 
264  unsigned &SrcOpIdx1,
265  unsigned &SrcOpIdx2) const {
266  assert(!MI.isBundle() &&
267  "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
268 
269  const MCInstrDesc &MCID = MI.getDesc();
270  if (!MCID.isCommutable())
271  return false;
272 
273  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
274  // is not true, then the target must implement this.
275  unsigned CommutableOpIdx1 = MCID.getNumDefs();
276  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
277  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
278  CommutableOpIdx1, CommutableOpIdx2))
279  return false;
280 
281  if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
282  // No idea.
283  return false;
284  return true;
285 }
286 
288  if (!MI.isTerminator()) return false;
289 
290  // Conditional branch is a special case.
291  if (MI.isBranch() && !MI.isBarrier())
292  return true;
293  if (!MI.isPredicable())
294  return true;
295  return !isPredicated(MI);
296 }
297 
300  bool MadeChange = false;
301 
302  assert(!MI.isBundle() &&
303  "TargetInstrInfo::PredicateInstruction() can't handle bundles");
304 
305  const MCInstrDesc &MCID = MI.getDesc();
306  if (!MI.isPredicable())
307  return false;
308 
309  for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
310  if (MCID.OpInfo[i].isPredicate()) {
311  MachineOperand &MO = MI.getOperand(i);
312  if (MO.isReg()) {
313  MO.setReg(Pred[j].getReg());
314  MadeChange = true;
315  } else if (MO.isImm()) {
316  MO.setImm(Pred[j].getImm());
317  MadeChange = true;
318  } else if (MO.isMBB()) {
319  MO.setMBB(Pred[j].getMBB());
320  MadeChange = true;
321  }
322  ++j;
323  }
324  }
325  return MadeChange;
326 }
327 
329  const MachineMemOperand *&MMO,
330  int &FrameIndex) const {
332  oe = MI.memoperands_end();
333  o != oe; ++o) {
334  if ((*o)->isLoad()) {
336  dyn_cast_or_null<FixedStackPseudoSourceValue>(
337  (*o)->getPseudoValue())) {
338  FrameIndex = Value->getFrameIndex();
339  MMO = *o;
340  return true;
341  }
342  }
343  }
344  return false;
345 }
346 
348  const MachineMemOperand *&MMO,
349  int &FrameIndex) const {
351  oe = MI.memoperands_end();
352  o != oe; ++o) {
353  if ((*o)->isStore()) {
355  dyn_cast_or_null<FixedStackPseudoSourceValue>(
356  (*o)->getPseudoValue())) {
357  FrameIndex = Value->getFrameIndex();
358  MMO = *o;
359  return true;
360  }
361  }
362  }
363  return false;
364 }
365 
367  unsigned SubIdx, unsigned &Size,
368  unsigned &Offset,
369  const MachineFunction &MF) const {
370  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
371  if (!SubIdx) {
372  Size = TRI->getSpillSize(*RC);
373  Offset = 0;
374  return true;
375  }
376  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
377  // Convert bit size to byte size to be consistent with
378  // MCRegisterClass::getSize().
379  if (BitSize % 8)
380  return false;
381 
382  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
383  if (BitOffset < 0 || BitOffset % 8)
384  return false;
385 
386  Size = BitSize /= 8;
387  Offset = (unsigned)BitOffset / 8;
388 
389  assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
390 
391  if (!MF.getDataLayout().isLittleEndian()) {
392  Offset = TRI->getSpillSize(*RC) - (Offset + Size);
393  }
394  return true;
395 }
396 
399  unsigned DestReg, unsigned SubIdx,
400  const MachineInstr &Orig,
401  const TargetRegisterInfo &TRI) const {
402  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
403  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
404  MBB.insert(I, MI);
405 }
406 
408  const MachineInstr &MI1,
409  const MachineRegisterInfo *MRI) const {
411 }
412 
414  MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
415  assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
416  MachineFunction &MF = *MBB.getParent();
417  return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
418 }
419 
420 // If the COPY instruction in MI can be folded to a stack operation, return
421 // the register class to use.
423  unsigned FoldIdx) {
424  assert(MI.isCopy() && "MI must be a COPY instruction");
425  if (MI.getNumOperands() != 2)
426  return nullptr;
427  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
428 
429  const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
430  const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
431 
432  if (FoldOp.getSubReg() || LiveOp.getSubReg())
433  return nullptr;
434 
435  unsigned FoldReg = FoldOp.getReg();
436  unsigned LiveReg = LiveOp.getReg();
437 
439  "Cannot fold physregs");
440 
441  const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
442  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
443 
445  return RC->contains(LiveOp.getReg()) ? RC : nullptr;
446 
447  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
448  return RC;
449 
450  // FIXME: Allow folding when register classes are memory compatible.
451  return nullptr;
452 }
453 
454 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
455  llvm_unreachable("Not implemented");
456 }
457 
460  const TargetInstrInfo &TII) {
461  unsigned StartIdx = 0;
462  switch (MI.getOpcode()) {
463  case TargetOpcode::STACKMAP: {
464  // StackMapLiveValues are foldable
465  StartIdx = StackMapOpers(&MI).getVarIdx();
466  break;
467  }
468  case TargetOpcode::PATCHPOINT: {
469  // For PatchPoint, the call args are not foldable (even if reported in the
470  // stackmap e.g. via anyregcc).
471  StartIdx = PatchPointOpers(&MI).getVarIdx();
472  break;
473  }
474  case TargetOpcode::STATEPOINT: {
475  // For statepoints, fold deopt and gc arguments, but not call arguments.
476  StartIdx = StatepointOpers(&MI).getVarIdx();
477  break;
478  }
479  default:
480  llvm_unreachable("unexpected stackmap opcode");
481  }
482 
483  // Return false if any operands requested for folding are not foldable (not
484  // part of the stackmap's live values).
485  for (unsigned Op : Ops) {
486  if (Op < StartIdx)
487  return nullptr;
488  }
489 
490  MachineInstr *NewMI =
491  MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
492  MachineInstrBuilder MIB(MF, NewMI);
493 
494  // No need to fold return, the meta data, and function arguments
495  for (unsigned i = 0; i < StartIdx; ++i)
496  MIB.add(MI.getOperand(i));
497 
498  for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
499  MachineOperand &MO = MI.getOperand(i);
500  if (is_contained(Ops, i)) {
501  unsigned SpillSize;
502  unsigned SpillOffset;
503  // Compute the spill slot size and offset.
504  const TargetRegisterClass *RC =
505  MF.getRegInfo().getRegClass(MO.getReg());
506  bool Valid =
507  TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
508  if (!Valid)
509  report_fatal_error("cannot spill patchpoint subregister operand");
510  MIB.addImm(StackMaps::IndirectMemRefOp);
511  MIB.addImm(SpillSize);
512  MIB.addFrameIndex(FrameIndex);
513  MIB.addImm(SpillOffset);
514  }
515  else
516  MIB.add(MO);
517  }
518  return NewMI;
519 }
520 
522  ArrayRef<unsigned> Ops, int FI,
523  LiveIntervals *LIS) const {
524  auto Flags = MachineMemOperand::MONone;
525  for (unsigned OpIdx : Ops)
526  Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
528 
529  MachineBasicBlock *MBB = MI.getParent();
530  assert(MBB && "foldMemoryOperand needs an inserted instruction");
531  MachineFunction &MF = *MBB->getParent();
532 
533  // If we're not folding a load into a subreg, the size of the load is the
534  // size of the spill slot. But if we are, we need to figure out what the
535  // actual load size is.
536  int64_t MemSize = 0;
537  const MachineFrameInfo &MFI = MF.getFrameInfo();
538  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
539 
540  if (Flags & MachineMemOperand::MOStore) {
541  MemSize = MFI.getObjectSize(FI);
542  } else {
543  for (unsigned OpIdx : Ops) {
544  int64_t OpSize = MFI.getObjectSize(FI);
545 
546  if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
547  unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
548  if (SubRegSize > 0 && !(SubRegSize % 8))
549  OpSize = SubRegSize / 8;
550  }
551 
552  MemSize = std::max(MemSize, OpSize);
553  }
554  }
555 
556  assert(MemSize && "Did not expect a zero-sized stack slot");
557 
558  MachineInstr *NewMI = nullptr;
559 
560  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
561  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
562  MI.getOpcode() == TargetOpcode::STATEPOINT) {
563  // Fold stackmap/patchpoint.
564  NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
565  if (NewMI)
566  MBB->insert(MI, NewMI);
567  } else {
568  // Ask the target to do the actual folding.
569  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
570  }
571 
572  if (NewMI) {
573  NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
574  // Add a memory operand, foldMemoryOperandImpl doesn't do that.
575  assert((!(Flags & MachineMemOperand::MOStore) ||
576  NewMI->mayStore()) &&
577  "Folded a def to a non-store!");
578  assert((!(Flags & MachineMemOperand::MOLoad) ||
579  NewMI->mayLoad()) &&
580  "Folded a use to a non-load!");
581  assert(MFI.getObjectOffset(FI) != -1);
583  MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
584  MFI.getObjectAlignment(FI));
585  NewMI->addMemOperand(MF, MMO);
586 
587  return NewMI;
588  }
589 
590  // Straight COPY may fold as load/store.
591  if (!MI.isCopy() || Ops.size() != 1)
592  return nullptr;
593 
594  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
595  if (!RC)
596  return nullptr;
597 
598  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
600 
601  if (Flags == MachineMemOperand::MOStore)
602  storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
603  else
604  loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
605  return &*--Pos;
606 }
607 
609  ArrayRef<unsigned> Ops,
610  MachineInstr &LoadMI,
611  LiveIntervals *LIS) const {
612  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
613 #ifndef NDEBUG
614  for (unsigned OpIdx : Ops)
615  assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
616 #endif
617 
618  MachineBasicBlock &MBB = *MI.getParent();
619  MachineFunction &MF = *MBB.getParent();
620 
621  // Ask the target to do the actual folding.
622  MachineInstr *NewMI = nullptr;
623  int FrameIndex = 0;
624 
625  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
626  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
627  MI.getOpcode() == TargetOpcode::STATEPOINT) &&
628  isLoadFromStackSlot(LoadMI, FrameIndex)) {
629  // Fold stackmap/patchpoint.
630  NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
631  if (NewMI)
632  NewMI = &*MBB.insert(MI, NewMI);
633  } else {
634  // Ask the target to do the actual folding.
635  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
636  }
637 
638  if (!NewMI)
639  return nullptr;
640 
641  // Copy the memoperands from the load to the folded instruction.
642  if (MI.memoperands_empty()) {
643  NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
644  } else {
645  // Handle the rare case of folding multiple loads.
646  NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
648  E = LoadMI.memoperands_end();
649  I != E; ++I) {
650  NewMI->addMemOperand(MF, *I);
651  }
652  }
653  return NewMI;
654 }
655 
657  const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
658  const MachineOperand &Op1 = Inst.getOperand(1);
659  const MachineOperand &Op2 = Inst.getOperand(2);
660  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
661 
662  // We need virtual register definitions for the operands that we will
663  // reassociate.
664  MachineInstr *MI1 = nullptr;
665  MachineInstr *MI2 = nullptr;
667  MI1 = MRI.getUniqueVRegDef(Op1.getReg());
669  MI2 = MRI.getUniqueVRegDef(Op2.getReg());
670 
671  // And they need to be in the trace (otherwise, they won't have a depth).
672  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
673 }
674 
676  bool &Commuted) const {
677  const MachineBasicBlock *MBB = Inst.getParent();
678  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
679  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
680  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
681  unsigned AssocOpcode = Inst.getOpcode();
682 
683  // If only one operand has the same opcode and it's the second source operand,
684  // the operands must be commuted.
685  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
686  if (Commuted)
687  std::swap(MI1, MI2);
688 
689  // 1. The previous instruction must be the same type as Inst.
690  // 2. The previous instruction must have virtual register definitions for its
691  // operands in the same basic block as Inst.
692  // 3. The previous instruction's result must only be used by Inst.
693  return MI1->getOpcode() == AssocOpcode &&
694  hasReassociableOperands(*MI1, MBB) &&
695  MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
696 }
697 
698 // 1. The operation must be associative and commutative.
699 // 2. The instruction must have virtual register definitions for its
700 // operands in the same basic block.
701 // 3. The instruction must have a reassociable sibling.
703  bool &Commuted) const {
704  return isAssociativeAndCommutative(Inst) &&
705  hasReassociableOperands(Inst, Inst.getParent()) &&
706  hasReassociableSibling(Inst, Commuted);
707 }
708 
709 // The concept of the reassociation pass is that these operations can benefit
710 // from this kind of transformation:
711 //
712 // A = ? op ?
713 // B = A op X (Prev)
714 // C = B op Y (Root)
715 // -->
716 // A = ? op ?
717 // B = X op Y
718 // C = A op B
719 //
720 // breaking the dependency between A and B, allowing them to be executed in
721 // parallel (or back-to-back in a pipeline) instead of depending on each other.
722 
723 // FIXME: This has the potential to be expensive (compile time) while not
724 // improving the code at all. Some ways to limit the overhead:
725 // 1. Track successful transforms; bail out if hit rate gets too low.
726 // 2. Only enable at -O3 or some other non-default optimization level.
727 // 3. Pre-screen pattern candidates here: if an operand of the previous
728 // instruction is known to not increase the critical path, then don't match
729 // that pattern.
731  MachineInstr &Root,
732  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
733  bool Commute;
734  if (isReassociationCandidate(Root, Commute)) {
735  // We found a sequence of instructions that may be suitable for a
736  // reassociation of operands to increase ILP. Specify each commutation
737  // possibility for the Prev instruction in the sequence and let the
738  // machine combiner decide if changing the operands is worthwhile.
739  if (Commute) {
742  } else {
745  }
746  return true;
747  }
748 
749  return false;
750 }
751 
752 /// Return true when a code sequence can improve loop throughput.
753 bool
755  return false;
756 }
757 
758 /// Attempt the reassociation transformation to reduce critical path length.
759 /// See the above comments before getMachineCombinerPatterns().
761  MachineInstr &Root, MachineInstr &Prev,
762  MachineCombinerPattern Pattern,
765  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
766  MachineFunction *MF = Root.getMF();
768  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
769  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
770  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
771 
772  // This array encodes the operand index for each parameter because the
773  // operands may be commuted. Each row corresponds to a pattern value,
774  // and each column specifies the index of A, B, X, Y.
775  unsigned OpIdx[4][4] = {
776  { 1, 1, 2, 2 },
777  { 1, 2, 2, 1 },
778  { 2, 1, 1, 2 },
779  { 2, 2, 1, 1 }
780  };
781 
782  int Row;
783  switch (Pattern) {
784  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
785  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
786  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
787  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
788  default: llvm_unreachable("unexpected MachineCombinerPattern");
789  }
790 
791  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
792  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
793  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
794  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
795  MachineOperand &OpC = Root.getOperand(0);
796 
797  unsigned RegA = OpA.getReg();
798  unsigned RegB = OpB.getReg();
799  unsigned RegX = OpX.getReg();
800  unsigned RegY = OpY.getReg();
801  unsigned RegC = OpC.getReg();
802 
804  MRI.constrainRegClass(RegA, RC);
806  MRI.constrainRegClass(RegB, RC);
808  MRI.constrainRegClass(RegX, RC);
810  MRI.constrainRegClass(RegY, RC);
812  MRI.constrainRegClass(RegC, RC);
813 
814  // Create a new virtual register for the result of (X op Y) instead of
815  // recycling RegB because the MachineCombiner's computation of the critical
816  // path requires a new register definition rather than an existing one.
817  unsigned NewVR = MRI.createVirtualRegister(RC);
818  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
819 
820  unsigned Opcode = Root.getOpcode();
821  bool KillA = OpA.isKill();
822  bool KillX = OpX.isKill();
823  bool KillY = OpY.isKill();
824 
825  // Create new instructions for insertion.
826  MachineInstrBuilder MIB1 =
827  BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
828  .addReg(RegX, getKillRegState(KillX))
829  .addReg(RegY, getKillRegState(KillY));
830  MachineInstrBuilder MIB2 =
831  BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
832  .addReg(RegA, getKillRegState(KillA))
833  .addReg(NewVR, getKillRegState(true));
834 
835  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
836 
837  // Record new instructions for insertion and old instructions for deletion.
838  InsInstrs.push_back(MIB1);
839  InsInstrs.push_back(MIB2);
840  DelInstrs.push_back(&Prev);
841  DelInstrs.push_back(&Root);
842 }
843 
845  MachineInstr &Root, MachineCombinerPattern Pattern,
848  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
850 
851  // Select the previous instruction in the sequence based on the input pattern.
852  MachineInstr *Prev = nullptr;
853  switch (Pattern) {
856  Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
857  break;
860  Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
861  break;
862  default:
863  break;
864  }
865 
866  assert(Prev && "Unknown pattern for machine combiner");
867 
868  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
869 }
870 
871 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
872  const MachineInstr &MI, AliasAnalysis *AA) const {
873  const MachineFunction &MF = *MI.getMF();
874  const MachineRegisterInfo &MRI = MF.getRegInfo();
875 
876  // Remat clients assume operand 0 is the defined register.
877  if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
878  return false;
879  unsigned DefReg = MI.getOperand(0).getReg();
880 
881  // A sub-register definition can only be rematerialized if the instruction
882  // doesn't read the other parts of the register. Otherwise it is really a
883  // read-modify-write operation on the full virtual register which cannot be
884  // moved safely.
886  MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
887  return false;
888 
889  // A load from a fixed stack slot can be rematerialized. This may be
890  // redundant with subsequent checks, but it's target-independent,
891  // simple, and a common case.
892  int FrameIdx = 0;
893  if (isLoadFromStackSlot(MI, FrameIdx) &&
894  MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
895  return true;
896 
897  // Avoid instructions obviously unsafe for remat.
898  if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
899  return false;
900 
901  // Don't remat inline asm. We have no idea how expensive it is
902  // even if it's side effect free.
903  if (MI.isInlineAsm())
904  return false;
905 
906  // Avoid instructions which load from potentially varying memory.
907  if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
908  return false;
909 
910  // If any of the registers accessed are non-constant, conservatively assume
911  // the instruction is not rematerializable.
912  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
913  const MachineOperand &MO = MI.getOperand(i);
914  if (!MO.isReg()) continue;
915  unsigned Reg = MO.getReg();
916  if (Reg == 0)
917  continue;
918 
919  // Check for a well-behaved physical register.
921  if (MO.isUse()) {
922  // If the physreg has no defs anywhere, it's just an ambient register
923  // and we can freely move its uses. Alternatively, if it's allocatable,
924  // it could get allocated to something with a def during allocation.
925  if (!MRI.isConstantPhysReg(Reg))
926  return false;
927  } else {
928  // A physreg def. We can't remat it.
929  return false;
930  }
931  continue;
932  }
933 
934  // Only allow one virtual-register def. There may be multiple defs of the
935  // same virtual register, though.
936  if (MO.isDef() && Reg != DefReg)
937  return false;
938 
939  // Don't allow any virtual-register uses. Rematting an instruction with
940  // virtual register uses would length the live ranges of the uses, which
941  // is not necessarily a good idea, certainly not "trivial".
942  if (MO.isUse())
943  return false;
944  }
945 
946  // Everything checked out.
947  return true;
948 }
949 
951  const MachineFunction *MF = MI.getMF();
953  bool StackGrowsDown =
955 
956  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
957  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
958 
959  if (!isFrameInstr(MI))
960  return 0;
961 
962  int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
963 
964  if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
965  (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
966  SPAdj = -SPAdj;
967 
968  return SPAdj;
969 }
970 
971 /// isSchedulingBoundary - Test if the given instruction should be
972 /// considered a scheduling boundary. This primarily includes labels
973 /// and terminators.
975  const MachineBasicBlock *MBB,
976  const MachineFunction &MF) const {
977  // Terminators and labels can't be scheduled around.
978  if (MI.isTerminator() || MI.isPosition())
979  return true;
980 
981  // Don't attempt to schedule around any instruction that defines
982  // a stack-oriented pointer, as it's unlikely to be profitable. This
983  // saves compile time, because it doesn't require every single
984  // stack slot reference to depend on the instruction that does the
985  // modification.
986  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
987  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
989 }
990 
991 // Provide a global flag for disabling the PreRA hazard recognizer that targets
992 // may choose to honor.
994  return !DisableHazardRecognizer;
995 }
996 
997 // Default implementation of CreateTargetRAHazardRecognizer.
1000  const ScheduleDAG *DAG) const {
1001  // Dummy hazard recognizer allows all instructions to issue.
1002  return new ScheduleHazardRecognizer();
1003 }
1004 
1005 // Default implementation of CreateTargetMIHazardRecognizer.
1008  const ScheduleDAG *DAG) const {
1009  return (ScheduleHazardRecognizer *)
1010  new ScoreboardHazardRecognizer(II, DAG, "misched");
1011 }
1012 
1013 // Default implementation of CreateTargetPostRAHazardRecognizer.
1016  const ScheduleDAG *DAG) const {
1017  return (ScheduleHazardRecognizer *)
1018  new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1019 }
1020 
1021 //===----------------------------------------------------------------------===//
1022 // SelectionDAG latency interface.
1023 //===----------------------------------------------------------------------===//
1024 
1025 int
1027  SDNode *DefNode, unsigned DefIdx,
1028  SDNode *UseNode, unsigned UseIdx) const {
1029  if (!ItinData || ItinData->isEmpty())
1030  return -1;
1031 
1032  if (!DefNode->isMachineOpcode())
1033  return -1;
1034 
1035  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1036  if (!UseNode->isMachineOpcode())
1037  return ItinData->getOperandCycle(DefClass, DefIdx);
1038  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1039  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1040 }
1041 
1043  SDNode *N) const {
1044  if (!ItinData || ItinData->isEmpty())
1045  return 1;
1046 
1047  if (!N->isMachineOpcode())
1048  return 1;
1049 
1050  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1051 }
1052 
1053 //===----------------------------------------------------------------------===//
1054 // MachineInstr latency interface.
1055 //===----------------------------------------------------------------------===//
1056 
1058  const MachineInstr &MI) const {
1059  if (!ItinData || ItinData->isEmpty())
1060  return 1;
1061 
1062  unsigned Class = MI.getDesc().getSchedClass();
1063  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1064  if (UOps >= 0)
1065  return UOps;
1066 
1067  // The # of u-ops is dynamically determined. The specific target should
1068  // override this function to return the right number.
1069  return 1;
1070 }
1071 
1072 /// Return the default expected latency for a def based on it's opcode.
1074  const MachineInstr &DefMI) const {
1075  if (DefMI.isTransient())
1076  return 0;
1077  if (DefMI.mayLoad())
1078  return SchedModel.LoadLatency;
1079  if (isHighLatencyDef(DefMI.getOpcode()))
1080  return SchedModel.HighLatency;
1081  return 1;
1082 }
1083 
1085  return 0;
1086 }
1087 
1089  const MachineInstr &MI,
1090  unsigned *PredCost) const {
1091  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1092  // still have a MinLatency property, which getStageLatency checks.
1093  if (!ItinData)
1094  return MI.mayLoad() ? 2 : 1;
1095 
1096  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1097 }
1098 
1100  const MachineInstr &DefMI,
1101  unsigned DefIdx) const {
1102  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1103  if (!ItinData || ItinData->isEmpty())
1104  return false;
1105 
1106  unsigned DefClass = DefMI.getDesc().getSchedClass();
1107  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1108  return (DefCycle != -1 && DefCycle <= 1);
1109 }
1110 
1111 /// Both DefMI and UseMI must be valid. By default, call directly to the
1112 /// itinerary. This may be overriden by the target.
1114  const MachineInstr &DefMI,
1115  unsigned DefIdx,
1116  const MachineInstr &UseMI,
1117  unsigned UseIdx) const {
1118  unsigned DefClass = DefMI.getDesc().getSchedClass();
1119  unsigned UseClass = UseMI.getDesc().getSchedClass();
1120  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1121 }
1122 
1123 /// If we can determine the operand latency from the def only, without itinerary
1124 /// lookup, do so. Otherwise return -1.
1126  const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1127 
1128  // Let the target hook getInstrLatency handle missing itineraries.
1129  if (!ItinData)
1130  return getInstrLatency(ItinData, DefMI);
1131 
1132  if(ItinData->isEmpty())
1133  return defaultDefLatency(ItinData->SchedModel, DefMI);
1134 
1135  // ...operand lookup required
1136  return -1;
1137 }
1138 
1140  const MachineInstr &MI, unsigned DefIdx,
1141  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1142  assert((MI.isRegSequence() ||
1143  MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1144 
1145  if (!MI.isRegSequence())
1146  return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1147 
1148  // We are looking at:
1149  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1150  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1151  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1152  OpIdx += 2) {
1153  const MachineOperand &MOReg = MI.getOperand(OpIdx);
1154  const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1155  assert(MOSubIdx.isImm() &&
1156  "One of the subindex of the reg_sequence is not an immediate");
1157  // Record Reg:SubReg, SubIdx.
1158  InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1159  (unsigned)MOSubIdx.getImm()));
1160  }
1161  return true;
1162 }
1163 
1165  const MachineInstr &MI, unsigned DefIdx,
1166  RegSubRegPairAndIdx &InputReg) const {
1167  assert((MI.isExtractSubreg() ||
1168  MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1169 
1170  if (!MI.isExtractSubreg())
1171  return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1172 
1173  // We are looking at:
1174  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1175  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1176  const MachineOperand &MOReg = MI.getOperand(1);
1177  const MachineOperand &MOSubIdx = MI.getOperand(2);
1178  assert(MOSubIdx.isImm() &&
1179  "The subindex of the extract_subreg is not an immediate");
1180 
1181  InputReg.Reg = MOReg.getReg();
1182  InputReg.SubReg = MOReg.getSubReg();
1183  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1184  return true;
1185 }
1186 
1188  const MachineInstr &MI, unsigned DefIdx,
1189  RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1190  assert((MI.isInsertSubreg() ||
1191  MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1192 
1193  if (!MI.isInsertSubreg())
1194  return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1195 
1196  // We are looking at:
1197  // Def = INSERT_SEQUENCE v0, v1, sub0.
1198  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1199  const MachineOperand &MOBaseReg = MI.getOperand(1);
1200  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1201  const MachineOperand &MOSubIdx = MI.getOperand(3);
1202  assert(MOSubIdx.isImm() &&
1203  "One of the subindex of the reg_sequence is not an immediate");
1204  BaseReg.Reg = MOBaseReg.getReg();
1205  BaseReg.SubReg = MOBaseReg.getSubReg();
1206 
1207  InsertedReg.Reg = MOInsertedReg.getReg();
1208  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1209  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1210  return true;
1211 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
Definition: MachineInstr.h:965
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class...
Definition: MCInstrDesc.h:90
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:601
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:268
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
void setIsUndef(bool Val=true)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
unsigned getSubReg() const
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
bool isInlineAsm() const
Definition: MachineInstr.h:832
virtual const TargetLowering * getTargetLowering() const
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:512
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
bool isRegSequence() const
Definition: MachineInstr.h:849
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
Definition: MachineInstr.h:900
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
A debug info location.
Definition: DebugLoc.h:34
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, LiveIntervals *LIS=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
unsigned getCallFrameDestroyOpcode() const
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
bool isInternalRead() const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
int NumMicroOps
of micro-ops, -1 means it&#39;s variable
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
A description of a memory reference used in the backend.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
unsigned getVarIdx() const
Get starting index of non call related arguments (calling convention, statepoint flags, vm state and gc state).
Definition: StackMaps.h:173
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:293
const InstrItinerary * Itineraries
Array of itineraries selected.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
unsigned SubReg
MachineInstr & CloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Reg
All possible values of the reg field in the ModR/M byte.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:474
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:290
const InstrItineraryData * getInstrItineraries() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:287
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
bool isBundle() const
Definition: MachineInstr.h:853
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment. ...
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:216
Itinerary data supplied by a subtarget to be used by a target.
virtual const TargetInstrInfo * getInstrInfo() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const
Return true if this load instruction never traps and points to a memory location whose value doesn&#39;t ...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:482
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:159
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
TargetInstrInfo - Interface to description of machine instruction set.
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
static const unsigned CommuteAnyOperandIndex
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:565
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:639
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
unsigned LoadLatency
Definition: MCSchedule.h:168
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
virtual unsigned getPredicationCost(const MachineInstr &MI) const
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool readsVirtualRegister(unsigned Reg) const
Return true if the MachineInstr reads the specified virtual register.
Definition: MachineInstr.h:935
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:464
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
void setMBB(MachineBasicBlock *MBB)
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
StringRef getCommentString() const
Definition: MCAsmInfo.h:470
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:128
void setImm(int64_t immVal)
void setIsInternalRead(bool Val=true)
MI-level patchpoint operands.
Definition: StackMaps.h:77
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
Measure the specified inline asm to determine an approximation of its length.
const MachineInstrBuilder & addFrameIndex(int Idx) const
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
Definition: MachineInstr.h:857
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool isInsertSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic INSERT_SUBREG instructions...
Definition: MachineInstr.h:615
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index.
Information about a live register.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isConstantPhysReg(unsigned PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
void setIsKill(bool Val=true)
The memory access writes data.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:187
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
Iterator for intrusive lists based on ilist_node.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:389
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
bool isInsertSubreg() const
Definition: MachineInstr.h:841
A pair composed of a register and a sub-register index.
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:57
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
Information about stack frame layout on the target.
unsigned HighLatency
Definition: MCSchedule.h:175
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
Represents one node in the SelectionDAG.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:461
virtual bool hasStoreToStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool isEmpty() const
Returns true if there are no itineraries.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isRegSequenceLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic REG_SEQUENCE instructions.
Definition: MachineInstr.h:586
Representation of each machine instruction.
Definition: MachineInstr.h:59
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MCSchedModel SchedModel
Basic machine properties.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
Definition: StackMaps.h:36
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:76
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:572
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void setSubReg(unsigned subReg)
virtual const TargetFrameLowering * getFrameLowering() const
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
MI-level Statepoint operands.
Definition: StackMaps.h:155
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:626
bool memoperands_empty() const
Return true if we don&#39;t have any memory operands which described the the memory access done by this i...
Definition: MachineInstr.h:394
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM Value Representation.
Definition: Value.h:73
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
bool isPosition() const
Definition: MachineInstr.h:814
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:174
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
IRTranslator LLVM IR MI
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:465
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index...
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:295
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:136
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool isExtractSubreg() const
Definition: MachineInstr.h:865
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr&#39;s memory reference descriptor list.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:667
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
Definition: MachineInstr.h:542
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:390
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
A pair composed of a pair of a register and a sub-register index, and another sub-register index...
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:821