LLVM  7.0.0svn
TargetInstrInfo.cpp
Go to the documentation of this file.
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
21 #include "llvm/CodeGen/StackMaps.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/MC/MCAsmInfo.h"
33 #include <cctype>
34 
35 using namespace llvm;
36 
38  "disable-sched-hazard", cl::Hidden, cl::init(false),
39  cl::desc("Disable hazard detection during preRA scheduling"));
40 
42 }
43 
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46  const TargetRegisterInfo *TRI,
47  const MachineFunction &MF) const {
48  if (OpNum >= MCID.getNumOperands())
49  return nullptr;
50 
51  short RegClass = MCID.OpInfo[OpNum].RegClass;
52  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53  return TRI->getPointerRegClass(MF, RegClass);
54 
55  // Instructions like INSERT_SUBREG do not have fixed register classes.
56  if (RegClass < 0)
57  return nullptr;
58 
59  // Otherwise just look it up normally.
60  return TRI->getRegClass(RegClass);
61 }
62 
63 /// insertNoop - Insert a noop into the instruction stream at the specified
64 /// point.
67  llvm_unreachable("Target didn't implement insertNoop!");
68 }
69 
70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
71  return strncmp(Str, MAI.getCommentString().data(),
72  MAI.getCommentString().size()) == 0;
73 }
74 
75 /// Measure the specified inline asm to determine an approximation of its
76 /// length.
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
89 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
90  const MCAsmInfo &MAI) const {
91  // Count the number of instructions in the asm.
92  bool AtInsnStart = true;
93  unsigned Length = 0;
94  for (; *Str; ++Str) {
95  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
96  strlen(MAI.getSeparatorString())) == 0) {
97  AtInsnStart = true;
98  } else if (isAsmComment(Str, MAI)) {
99  // Stop counting as an instruction after a comment until the next
100  // separator.
101  AtInsnStart = false;
102  }
103 
104  if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
105  unsigned AddLength = MAI.getMaxInstLength();
106  if (strncmp(Str, ".space", 6) == 0) {
107  char *EStr;
108  int SpaceSize;
109  SpaceSize = strtol(Str + 6, &EStr, 10);
110  SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
111  while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
112  ++EStr;
113  if (*EStr == '\0' || *EStr == '\n' ||
114  isAsmComment(EStr, MAI)) // Successfully parsed .space argument
115  AddLength = SpaceSize;
116  }
117  Length += AddLength;
118  AtInsnStart = false;
119  }
120  }
121 
122  return Length;
123 }
124 
125 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
126 /// after it, replacing it with an unconditional branch to NewDest.
127 void
129  MachineBasicBlock *NewDest) const {
130  MachineBasicBlock *MBB = Tail->getParent();
131 
132  // Remove all the old successors of MBB from the CFG.
133  while (!MBB->succ_empty())
134  MBB->removeSuccessor(MBB->succ_begin());
135 
136  // Save off the debug loc before erasing the instruction.
137  DebugLoc DL = Tail->getDebugLoc();
138 
139  // Remove all the dead instructions from the end of MBB.
140  MBB->erase(Tail, MBB->end());
141 
142  // If MBB isn't immediately before MBB, insert a branch to it.
144  insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
145  MBB->addSuccessor(NewDest);
146 }
147 
149  bool NewMI, unsigned Idx1,
150  unsigned Idx2) const {
151  const MCInstrDesc &MCID = MI.getDesc();
152  bool HasDef = MCID.getNumDefs();
153  if (HasDef && !MI.getOperand(0).isReg())
154  // No idea how to commute this instruction. Target should implement its own.
155  return nullptr;
156 
157  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
158  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
159  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
160  CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
161  "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
162  assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
163  "This only knows how to commute register operands so far");
164 
165  unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
166  unsigned Reg1 = MI.getOperand(Idx1).getReg();
167  unsigned Reg2 = MI.getOperand(Idx2).getReg();
168  unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
169  unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
170  unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
171  bool Reg1IsKill = MI.getOperand(Idx1).isKill();
172  bool Reg2IsKill = MI.getOperand(Idx2).isKill();
173  bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
174  bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
175  bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
176  bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
177  // Avoid calling isRenamable for virtual registers since we assert that
178  // renamable property is only queried/set for physical registers.
179  bool Reg1IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg1)
180  ? MI.getOperand(Idx1).isRenamable()
181  : false;
182  bool Reg2IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg2)
183  ? MI.getOperand(Idx2).isRenamable()
184  : false;
185  // If destination is tied to either of the commuted source register, then
186  // it must be updated.
187  if (HasDef && Reg0 == Reg1 &&
188  MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
189  Reg2IsKill = false;
190  Reg0 = Reg2;
191  SubReg0 = SubReg2;
192  } else if (HasDef && Reg0 == Reg2 &&
193  MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
194  Reg1IsKill = false;
195  Reg0 = Reg1;
196  SubReg0 = SubReg1;
197  }
198 
199  MachineInstr *CommutedMI = nullptr;
200  if (NewMI) {
201  // Create a new instruction.
202  MachineFunction &MF = *MI.getMF();
203  CommutedMI = MF.CloneMachineInstr(&MI);
204  } else {
205  CommutedMI = &MI;
206  }
207 
208  if (HasDef) {
209  CommutedMI->getOperand(0).setReg(Reg0);
210  CommutedMI->getOperand(0).setSubReg(SubReg0);
211  }
212  CommutedMI->getOperand(Idx2).setReg(Reg1);
213  CommutedMI->getOperand(Idx1).setReg(Reg2);
214  CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
215  CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
216  CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
217  CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
218  CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
219  CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
220  CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
221  CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
222  // Avoid calling setIsRenamable for virtual registers since we assert that
223  // renamable property is only queried/set for physical registers.
225  CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
227  CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
228  return CommutedMI;
229 }
230 
232  unsigned OpIdx1,
233  unsigned OpIdx2) const {
234  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
235  // any commutable operand, which is done in findCommutedOpIndices() method
236  // called below.
237  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
238  !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
239  assert(MI.isCommutable() &&
240  "Precondition violation: MI must be commutable.");
241  return nullptr;
242  }
243  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
244 }
245 
246 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
247  unsigned &ResultIdx2,
248  unsigned CommutableOpIdx1,
249  unsigned CommutableOpIdx2) {
250  if (ResultIdx1 == CommuteAnyOperandIndex &&
251  ResultIdx2 == CommuteAnyOperandIndex) {
252  ResultIdx1 = CommutableOpIdx1;
253  ResultIdx2 = CommutableOpIdx2;
254  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
255  if (ResultIdx2 == CommutableOpIdx1)
256  ResultIdx1 = CommutableOpIdx2;
257  else if (ResultIdx2 == CommutableOpIdx2)
258  ResultIdx1 = CommutableOpIdx1;
259  else
260  return false;
261  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
262  if (ResultIdx1 == CommutableOpIdx1)
263  ResultIdx2 = CommutableOpIdx2;
264  else if (ResultIdx1 == CommutableOpIdx2)
265  ResultIdx2 = CommutableOpIdx1;
266  else
267  return false;
268  } else
269  // Check that the result operand indices match the given commutable
270  // operand indices.
271  return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
272  (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
273 
274  return true;
275 }
276 
278  unsigned &SrcOpIdx1,
279  unsigned &SrcOpIdx2) const {
280  assert(!MI.isBundle() &&
281  "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
282 
283  const MCInstrDesc &MCID = MI.getDesc();
284  if (!MCID.isCommutable())
285  return false;
286 
287  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
288  // is not true, then the target must implement this.
289  unsigned CommutableOpIdx1 = MCID.getNumDefs();
290  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
291  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
292  CommutableOpIdx1, CommutableOpIdx2))
293  return false;
294 
295  if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
296  // No idea.
297  return false;
298  return true;
299 }
300 
302  if (!MI.isTerminator()) return false;
303 
304  // Conditional branch is a special case.
305  if (MI.isBranch() && !MI.isBarrier())
306  return true;
307  if (!MI.isPredicable())
308  return true;
309  return !isPredicated(MI);
310 }
311 
314  bool MadeChange = false;
315 
316  assert(!MI.isBundle() &&
317  "TargetInstrInfo::PredicateInstruction() can't handle bundles");
318 
319  const MCInstrDesc &MCID = MI.getDesc();
320  if (!MI.isPredicable())
321  return false;
322 
323  for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
324  if (MCID.OpInfo[i].isPredicate()) {
325  MachineOperand &MO = MI.getOperand(i);
326  if (MO.isReg()) {
327  MO.setReg(Pred[j].getReg());
328  MadeChange = true;
329  } else if (MO.isImm()) {
330  MO.setImm(Pred[j].getImm());
331  MadeChange = true;
332  } else if (MO.isMBB()) {
333  MO.setMBB(Pred[j].getMBB());
334  MadeChange = true;
335  }
336  ++j;
337  }
338  }
339  return MadeChange;
340 }
341 
343  const MachineMemOperand *&MMO,
344  int &FrameIndex) const {
346  oe = MI.memoperands_end();
347  o != oe; ++o) {
348  if ((*o)->isLoad()) {
350  dyn_cast_or_null<FixedStackPseudoSourceValue>(
351  (*o)->getPseudoValue())) {
352  FrameIndex = Value->getFrameIndex();
353  MMO = *o;
354  return true;
355  }
356  }
357  }
358  return false;
359 }
360 
362  const MachineMemOperand *&MMO,
363  int &FrameIndex) const {
365  oe = MI.memoperands_end();
366  o != oe; ++o) {
367  if ((*o)->isStore()) {
369  dyn_cast_or_null<FixedStackPseudoSourceValue>(
370  (*o)->getPseudoValue())) {
371  FrameIndex = Value->getFrameIndex();
372  MMO = *o;
373  return true;
374  }
375  }
376  }
377  return false;
378 }
379 
381  unsigned SubIdx, unsigned &Size,
382  unsigned &Offset,
383  const MachineFunction &MF) const {
385  if (!SubIdx) {
386  Size = TRI->getSpillSize(*RC);
387  Offset = 0;
388  return true;
389  }
390  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
391  // Convert bit size to byte size to be consistent with
392  // MCRegisterClass::getSize().
393  if (BitSize % 8)
394  return false;
395 
396  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
397  if (BitOffset < 0 || BitOffset % 8)
398  return false;
399 
400  Size = BitSize /= 8;
401  Offset = (unsigned)BitOffset / 8;
402 
403  assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
404 
405  if (!MF.getDataLayout().isLittleEndian()) {
406  Offset = TRI->getSpillSize(*RC) - (Offset + Size);
407  }
408  return true;
409 }
410 
413  unsigned DestReg, unsigned SubIdx,
414  const MachineInstr &Orig,
415  const TargetRegisterInfo &TRI) const {
416  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
417  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
418  MBB.insert(I, MI);
419 }
420 
422  const MachineInstr &MI1,
423  const MachineRegisterInfo *MRI) const {
425 }
426 
428  MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
429  assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
430  MachineFunction &MF = *MBB.getParent();
431  return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
432 }
433 
434 // If the COPY instruction in MI can be folded to a stack operation, return
435 // the register class to use.
437  unsigned FoldIdx) {
438  assert(MI.isCopy() && "MI must be a COPY instruction");
439  if (MI.getNumOperands() != 2)
440  return nullptr;
441  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
442 
443  const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
444  const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
445 
446  if (FoldOp.getSubReg() || LiveOp.getSubReg())
447  return nullptr;
448 
449  unsigned FoldReg = FoldOp.getReg();
450  unsigned LiveReg = LiveOp.getReg();
451 
453  "Cannot fold physregs");
454 
455  const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
456  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
457 
459  return RC->contains(LiveOp.getReg()) ? RC : nullptr;
460 
461  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
462  return RC;
463 
464  // FIXME: Allow folding when register classes are memory compatible.
465  return nullptr;
466 }
467 
468 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
469  llvm_unreachable("Not implemented");
470 }
471 
474  const TargetInstrInfo &TII) {
475  unsigned StartIdx = 0;
476  switch (MI.getOpcode()) {
477  case TargetOpcode::STACKMAP: {
478  // StackMapLiveValues are foldable
479  StartIdx = StackMapOpers(&MI).getVarIdx();
480  break;
481  }
482  case TargetOpcode::PATCHPOINT: {
483  // For PatchPoint, the call args are not foldable (even if reported in the
484  // stackmap e.g. via anyregcc).
485  StartIdx = PatchPointOpers(&MI).getVarIdx();
486  break;
487  }
488  case TargetOpcode::STATEPOINT: {
489  // For statepoints, fold deopt and gc arguments, but not call arguments.
490  StartIdx = StatepointOpers(&MI).getVarIdx();
491  break;
492  }
493  default:
494  llvm_unreachable("unexpected stackmap opcode");
495  }
496 
497  // Return false if any operands requested for folding are not foldable (not
498  // part of the stackmap's live values).
499  for (unsigned Op : Ops) {
500  if (Op < StartIdx)
501  return nullptr;
502  }
503 
504  MachineInstr *NewMI =
505  MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
506  MachineInstrBuilder MIB(MF, NewMI);
507 
508  // No need to fold return, the meta data, and function arguments
509  for (unsigned i = 0; i < StartIdx; ++i)
510  MIB.add(MI.getOperand(i));
511 
512  for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
513  MachineOperand &MO = MI.getOperand(i);
514  if (is_contained(Ops, i)) {
515  unsigned SpillSize;
516  unsigned SpillOffset;
517  // Compute the spill slot size and offset.
518  const TargetRegisterClass *RC =
519  MF.getRegInfo().getRegClass(MO.getReg());
520  bool Valid =
521  TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
522  if (!Valid)
523  report_fatal_error("cannot spill patchpoint subregister operand");
524  MIB.addImm(StackMaps::IndirectMemRefOp);
525  MIB.addImm(SpillSize);
526  MIB.addFrameIndex(FrameIndex);
527  MIB.addImm(SpillOffset);
528  }
529  else
530  MIB.add(MO);
531  }
532  return NewMI;
533 }
534 
536  ArrayRef<unsigned> Ops, int FI,
537  LiveIntervals *LIS) const {
538  auto Flags = MachineMemOperand::MONone;
539  for (unsigned OpIdx : Ops)
540  Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
542 
543  MachineBasicBlock *MBB = MI.getParent();
544  assert(MBB && "foldMemoryOperand needs an inserted instruction");
545  MachineFunction &MF = *MBB->getParent();
546 
547  // If we're not folding a load into a subreg, the size of the load is the
548  // size of the spill slot. But if we are, we need to figure out what the
549  // actual load size is.
550  int64_t MemSize = 0;
551  const MachineFrameInfo &MFI = MF.getFrameInfo();
553 
554  if (Flags & MachineMemOperand::MOStore) {
555  MemSize = MFI.getObjectSize(FI);
556  } else {
557  for (unsigned OpIdx : Ops) {
558  int64_t OpSize = MFI.getObjectSize(FI);
559 
560  if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
561  unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
562  if (SubRegSize > 0 && !(SubRegSize % 8))
563  OpSize = SubRegSize / 8;
564  }
565 
566  MemSize = std::max(MemSize, OpSize);
567  }
568  }
569 
570  assert(MemSize && "Did not expect a zero-sized stack slot");
571 
572  MachineInstr *NewMI = nullptr;
573 
574  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
575  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
576  MI.getOpcode() == TargetOpcode::STATEPOINT) {
577  // Fold stackmap/patchpoint.
578  NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
579  if (NewMI)
580  MBB->insert(MI, NewMI);
581  } else {
582  // Ask the target to do the actual folding.
583  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
584  }
585 
586  if (NewMI) {
587  NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
588  // Add a memory operand, foldMemoryOperandImpl doesn't do that.
589  assert((!(Flags & MachineMemOperand::MOStore) ||
590  NewMI->mayStore()) &&
591  "Folded a def to a non-store!");
592  assert((!(Flags & MachineMemOperand::MOLoad) ||
593  NewMI->mayLoad()) &&
594  "Folded a use to a non-load!");
595  assert(MFI.getObjectOffset(FI) != -1);
597  MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
598  MFI.getObjectAlignment(FI));
599  NewMI->addMemOperand(MF, MMO);
600 
601  return NewMI;
602  }
603 
604  // Straight COPY may fold as load/store.
605  if (!MI.isCopy() || Ops.size() != 1)
606  return nullptr;
607 
608  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
609  if (!RC)
610  return nullptr;
611 
612  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
614 
615  if (Flags == MachineMemOperand::MOStore)
616  storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
617  else
618  loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
619  return &*--Pos;
620 }
621 
623  ArrayRef<unsigned> Ops,
624  MachineInstr &LoadMI,
625  LiveIntervals *LIS) const {
626  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
627 #ifndef NDEBUG
628  for (unsigned OpIdx : Ops)
629  assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
630 #endif
631 
632  MachineBasicBlock &MBB = *MI.getParent();
633  MachineFunction &MF = *MBB.getParent();
634 
635  // Ask the target to do the actual folding.
636  MachineInstr *NewMI = nullptr;
637  int FrameIndex = 0;
638 
639  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
640  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
641  MI.getOpcode() == TargetOpcode::STATEPOINT) &&
642  isLoadFromStackSlot(LoadMI, FrameIndex)) {
643  // Fold stackmap/patchpoint.
644  NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
645  if (NewMI)
646  NewMI = &*MBB.insert(MI, NewMI);
647  } else {
648  // Ask the target to do the actual folding.
649  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
650  }
651 
652  if (!NewMI)
653  return nullptr;
654 
655  // Copy the memoperands from the load to the folded instruction.
656  if (MI.memoperands_empty()) {
657  NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
658  } else {
659  // Handle the rare case of folding multiple loads.
660  NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
662  E = LoadMI.memoperands_end();
663  I != E; ++I) {
664  NewMI->addMemOperand(MF, *I);
665  }
666  }
667  return NewMI;
668 }
669 
671  const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
672  const MachineOperand &Op1 = Inst.getOperand(1);
673  const MachineOperand &Op2 = Inst.getOperand(2);
674  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
675 
676  // We need virtual register definitions for the operands that we will
677  // reassociate.
678  MachineInstr *MI1 = nullptr;
679  MachineInstr *MI2 = nullptr;
681  MI1 = MRI.getUniqueVRegDef(Op1.getReg());
683  MI2 = MRI.getUniqueVRegDef(Op2.getReg());
684 
685  // And they need to be in the trace (otherwise, they won't have a depth).
686  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
687 }
688 
690  bool &Commuted) const {
691  const MachineBasicBlock *MBB = Inst.getParent();
692  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
693  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
694  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
695  unsigned AssocOpcode = Inst.getOpcode();
696 
697  // If only one operand has the same opcode and it's the second source operand,
698  // the operands must be commuted.
699  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
700  if (Commuted)
701  std::swap(MI1, MI2);
702 
703  // 1. The previous instruction must be the same type as Inst.
704  // 2. The previous instruction must have virtual register definitions for its
705  // operands in the same basic block as Inst.
706  // 3. The previous instruction's result must only be used by Inst.
707  return MI1->getOpcode() == AssocOpcode &&
708  hasReassociableOperands(*MI1, MBB) &&
709  MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
710 }
711 
712 // 1. The operation must be associative and commutative.
713 // 2. The instruction must have virtual register definitions for its
714 // operands in the same basic block.
715 // 3. The instruction must have a reassociable sibling.
717  bool &Commuted) const {
718  return isAssociativeAndCommutative(Inst) &&
719  hasReassociableOperands(Inst, Inst.getParent()) &&
720  hasReassociableSibling(Inst, Commuted);
721 }
722 
723 // The concept of the reassociation pass is that these operations can benefit
724 // from this kind of transformation:
725 //
726 // A = ? op ?
727 // B = A op X (Prev)
728 // C = B op Y (Root)
729 // -->
730 // A = ? op ?
731 // B = X op Y
732 // C = A op B
733 //
734 // breaking the dependency between A and B, allowing them to be executed in
735 // parallel (or back-to-back in a pipeline) instead of depending on each other.
736 
737 // FIXME: This has the potential to be expensive (compile time) while not
738 // improving the code at all. Some ways to limit the overhead:
739 // 1. Track successful transforms; bail out if hit rate gets too low.
740 // 2. Only enable at -O3 or some other non-default optimization level.
741 // 3. Pre-screen pattern candidates here: if an operand of the previous
742 // instruction is known to not increase the critical path, then don't match
743 // that pattern.
745  MachineInstr &Root,
746  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
747  bool Commute;
748  if (isReassociationCandidate(Root, Commute)) {
749  // We found a sequence of instructions that may be suitable for a
750  // reassociation of operands to increase ILP. Specify each commutation
751  // possibility for the Prev instruction in the sequence and let the
752  // machine combiner decide if changing the operands is worthwhile.
753  if (Commute) {
756  } else {
759  }
760  return true;
761  }
762 
763  return false;
764 }
765 
766 /// Return true when a code sequence can improve loop throughput.
767 bool
769  return false;
770 }
771 
772 /// Attempt the reassociation transformation to reduce critical path length.
773 /// See the above comments before getMachineCombinerPatterns().
775  MachineInstr &Root, MachineInstr &Prev,
776  MachineCombinerPattern Pattern,
779  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
780  MachineFunction *MF = Root.getMF();
782  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
784  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
785 
786  // This array encodes the operand index for each parameter because the
787  // operands may be commuted. Each row corresponds to a pattern value,
788  // and each column specifies the index of A, B, X, Y.
789  unsigned OpIdx[4][4] = {
790  { 1, 1, 2, 2 },
791  { 1, 2, 2, 1 },
792  { 2, 1, 1, 2 },
793  { 2, 2, 1, 1 }
794  };
795 
796  int Row;
797  switch (Pattern) {
798  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
799  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
800  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
801  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
802  default: llvm_unreachable("unexpected MachineCombinerPattern");
803  }
804 
805  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
806  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
807  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
808  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
809  MachineOperand &OpC = Root.getOperand(0);
810 
811  unsigned RegA = OpA.getReg();
812  unsigned RegB = OpB.getReg();
813  unsigned RegX = OpX.getReg();
814  unsigned RegY = OpY.getReg();
815  unsigned RegC = OpC.getReg();
816 
818  MRI.constrainRegClass(RegA, RC);
820  MRI.constrainRegClass(RegB, RC);
822  MRI.constrainRegClass(RegX, RC);
824  MRI.constrainRegClass(RegY, RC);
826  MRI.constrainRegClass(RegC, RC);
827 
828  // Create a new virtual register for the result of (X op Y) instead of
829  // recycling RegB because the MachineCombiner's computation of the critical
830  // path requires a new register definition rather than an existing one.
831  unsigned NewVR = MRI.createVirtualRegister(RC);
832  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
833 
834  unsigned Opcode = Root.getOpcode();
835  bool KillA = OpA.isKill();
836  bool KillX = OpX.isKill();
837  bool KillY = OpY.isKill();
838 
839  // Create new instructions for insertion.
840  MachineInstrBuilder MIB1 =
841  BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
842  .addReg(RegX, getKillRegState(KillX))
843  .addReg(RegY, getKillRegState(KillY));
844  MachineInstrBuilder MIB2 =
845  BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
846  .addReg(RegA, getKillRegState(KillA))
847  .addReg(NewVR, getKillRegState(true));
848 
849  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
850 
851  // Record new instructions for insertion and old instructions for deletion.
852  InsInstrs.push_back(MIB1);
853  InsInstrs.push_back(MIB2);
854  DelInstrs.push_back(&Prev);
855  DelInstrs.push_back(&Root);
856 }
857 
859  MachineInstr &Root, MachineCombinerPattern Pattern,
862  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
864 
865  // Select the previous instruction in the sequence based on the input pattern.
866  MachineInstr *Prev = nullptr;
867  switch (Pattern) {
870  Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
871  break;
874  Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
875  break;
876  default:
877  break;
878  }
879 
880  assert(Prev && "Unknown pattern for machine combiner");
881 
882  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
883 }
884 
885 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
886  const MachineInstr &MI, AliasAnalysis *AA) const {
887  const MachineFunction &MF = *MI.getMF();
888  const MachineRegisterInfo &MRI = MF.getRegInfo();
889 
890  // Remat clients assume operand 0 is the defined register.
891  if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
892  return false;
893  unsigned DefReg = MI.getOperand(0).getReg();
894 
895  // A sub-register definition can only be rematerialized if the instruction
896  // doesn't read the other parts of the register. Otherwise it is really a
897  // read-modify-write operation on the full virtual register which cannot be
898  // moved safely.
900  MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
901  return false;
902 
903  // A load from a fixed stack slot can be rematerialized. This may be
904  // redundant with subsequent checks, but it's target-independent,
905  // simple, and a common case.
906  int FrameIdx = 0;
907  if (isLoadFromStackSlot(MI, FrameIdx) &&
908  MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
909  return true;
910 
911  // Avoid instructions obviously unsafe for remat.
912  if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
913  return false;
914 
915  // Don't remat inline asm. We have no idea how expensive it is
916  // even if it's side effect free.
917  if (MI.isInlineAsm())
918  return false;
919 
920  // Avoid instructions which load from potentially varying memory.
921  if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
922  return false;
923 
924  // If any of the registers accessed are non-constant, conservatively assume
925  // the instruction is not rematerializable.
926  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
927  const MachineOperand &MO = MI.getOperand(i);
928  if (!MO.isReg()) continue;
929  unsigned Reg = MO.getReg();
930  if (Reg == 0)
931  continue;
932 
933  // Check for a well-behaved physical register.
935  if (MO.isUse()) {
936  // If the physreg has no defs anywhere, it's just an ambient register
937  // and we can freely move its uses. Alternatively, if it's allocatable,
938  // it could get allocated to something with a def during allocation.
939  if (!MRI.isConstantPhysReg(Reg))
940  return false;
941  } else {
942  // A physreg def. We can't remat it.
943  return false;
944  }
945  continue;
946  }
947 
948  // Only allow one virtual-register def. There may be multiple defs of the
949  // same virtual register, though.
950  if (MO.isDef() && Reg != DefReg)
951  return false;
952 
953  // Don't allow any virtual-register uses. Rematting an instruction with
954  // virtual register uses would length the live ranges of the uses, which
955  // is not necessarily a good idea, certainly not "trivial".
956  if (MO.isUse())
957  return false;
958  }
959 
960  // Everything checked out.
961  return true;
962 }
963 
965  const MachineFunction *MF = MI.getMF();
967  bool StackGrowsDown =
969 
970  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
971  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
972 
973  if (!isFrameInstr(MI))
974  return 0;
975 
976  int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
977 
978  if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
979  (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
980  SPAdj = -SPAdj;
981 
982  return SPAdj;
983 }
984 
985 /// isSchedulingBoundary - Test if the given instruction should be
986 /// considered a scheduling boundary. This primarily includes labels
987 /// and terminators.
989  const MachineBasicBlock *MBB,
990  const MachineFunction &MF) const {
991  // Terminators and labels can't be scheduled around.
992  if (MI.isTerminator() || MI.isPosition())
993  return true;
994 
995  // Don't attempt to schedule around any instruction that defines
996  // a stack-oriented pointer, as it's unlikely to be profitable. This
997  // saves compile time, because it doesn't require every single
998  // stack slot reference to depend on the instruction that does the
999  // modification.
1000  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1003 }
1004 
1005 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1006 // may choose to honor.
1008  return !DisableHazardRecognizer;
1009 }
1010 
1011 // Default implementation of CreateTargetRAHazardRecognizer.
1014  const ScheduleDAG *DAG) const {
1015  // Dummy hazard recognizer allows all instructions to issue.
1016  return new ScheduleHazardRecognizer();
1017 }
1018 
1019 // Default implementation of CreateTargetMIHazardRecognizer.
1022  const ScheduleDAG *DAG) const {
1023  return (ScheduleHazardRecognizer *)
1024  new ScoreboardHazardRecognizer(II, DAG, "misched");
1025 }
1026 
1027 // Default implementation of CreateTargetPostRAHazardRecognizer.
1030  const ScheduleDAG *DAG) const {
1031  return (ScheduleHazardRecognizer *)
1032  new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1033 }
1034 
1035 //===----------------------------------------------------------------------===//
1036 // SelectionDAG latency interface.
1037 //===----------------------------------------------------------------------===//
1038 
1039 int
1041  SDNode *DefNode, unsigned DefIdx,
1042  SDNode *UseNode, unsigned UseIdx) const {
1043  if (!ItinData || ItinData->isEmpty())
1044  return -1;
1045 
1046  if (!DefNode->isMachineOpcode())
1047  return -1;
1048 
1049  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1050  if (!UseNode->isMachineOpcode())
1051  return ItinData->getOperandCycle(DefClass, DefIdx);
1052  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1053  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1054 }
1055 
1057  SDNode *N) const {
1058  if (!ItinData || ItinData->isEmpty())
1059  return 1;
1060 
1061  if (!N->isMachineOpcode())
1062  return 1;
1063 
1064  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1065 }
1066 
1067 //===----------------------------------------------------------------------===//
1068 // MachineInstr latency interface.
1069 //===----------------------------------------------------------------------===//
1070 
1072  const MachineInstr &MI) const {
1073  if (!ItinData || ItinData->isEmpty())
1074  return 1;
1075 
1076  unsigned Class = MI.getDesc().getSchedClass();
1077  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1078  if (UOps >= 0)
1079  return UOps;
1080 
1081  // The # of u-ops is dynamically determined. The specific target should
1082  // override this function to return the right number.
1083  return 1;
1084 }
1085 
1086 /// Return the default expected latency for a def based on it's opcode.
1088  const MachineInstr &DefMI) const {
1089  if (DefMI.isTransient())
1090  return 0;
1091  if (DefMI.mayLoad())
1092  return SchedModel.LoadLatency;
1093  if (isHighLatencyDef(DefMI.getOpcode()))
1094  return SchedModel.HighLatency;
1095  return 1;
1096 }
1097 
1099  return 0;
1100 }
1101 
1103  const MachineInstr &MI,
1104  unsigned *PredCost) const {
1105  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1106  // still have a MinLatency property, which getStageLatency checks.
1107  if (!ItinData)
1108  return MI.mayLoad() ? 2 : 1;
1109 
1110  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1111 }
1112 
1114  const MachineInstr &DefMI,
1115  unsigned DefIdx) const {
1116  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1117  if (!ItinData || ItinData->isEmpty())
1118  return false;
1119 
1120  unsigned DefClass = DefMI.getDesc().getSchedClass();
1121  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1122  return (DefCycle != -1 && DefCycle <= 1);
1123 }
1124 
1125 /// Both DefMI and UseMI must be valid. By default, call directly to the
1126 /// itinerary. This may be overriden by the target.
1128  const MachineInstr &DefMI,
1129  unsigned DefIdx,
1130  const MachineInstr &UseMI,
1131  unsigned UseIdx) const {
1132  unsigned DefClass = DefMI.getDesc().getSchedClass();
1133  unsigned UseClass = UseMI.getDesc().getSchedClass();
1134  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1135 }
1136 
1137 /// If we can determine the operand latency from the def only, without itinerary
1138 /// lookup, do so. Otherwise return -1.
1140  const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1141 
1142  // Let the target hook getInstrLatency handle missing itineraries.
1143  if (!ItinData)
1144  return getInstrLatency(ItinData, DefMI);
1145 
1146  if(ItinData->isEmpty())
1147  return defaultDefLatency(ItinData->SchedModel, DefMI);
1148 
1149  // ...operand lookup required
1150  return -1;
1151 }
1152 
1154  const MachineInstr &MI, unsigned DefIdx,
1155  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1156  assert((MI.isRegSequence() ||
1157  MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1158 
1159  if (!MI.isRegSequence())
1160  return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1161 
1162  // We are looking at:
1163  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1164  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1165  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1166  OpIdx += 2) {
1167  const MachineOperand &MOReg = MI.getOperand(OpIdx);
1168  if (MOReg.isUndef())
1169  continue;
1170  const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1171  assert(MOSubIdx.isImm() &&
1172  "One of the subindex of the reg_sequence is not an immediate");
1173  // Record Reg:SubReg, SubIdx.
1174  InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1175  (unsigned)MOSubIdx.getImm()));
1176  }
1177  return true;
1178 }
1179 
1181  const MachineInstr &MI, unsigned DefIdx,
1182  RegSubRegPairAndIdx &InputReg) const {
1183  assert((MI.isExtractSubreg() ||
1184  MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1185 
1186  if (!MI.isExtractSubreg())
1187  return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1188 
1189  // We are looking at:
1190  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1191  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1192  const MachineOperand &MOReg = MI.getOperand(1);
1193  if (MOReg.isUndef())
1194  return false;
1195  const MachineOperand &MOSubIdx = MI.getOperand(2);
1196  assert(MOSubIdx.isImm() &&
1197  "The subindex of the extract_subreg is not an immediate");
1198 
1199  InputReg.Reg = MOReg.getReg();
1200  InputReg.SubReg = MOReg.getSubReg();
1201  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1202  return true;
1203 }
1204 
1206  const MachineInstr &MI, unsigned DefIdx,
1207  RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1208  assert((MI.isInsertSubreg() ||
1209  MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1210 
1211  if (!MI.isInsertSubreg())
1212  return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1213 
1214  // We are looking at:
1215  // Def = INSERT_SEQUENCE v0, v1, sub0.
1216  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1217  const MachineOperand &MOBaseReg = MI.getOperand(1);
1218  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1219  if (MOInsertedReg.isUndef())
1220  return false;
1221  const MachineOperand &MOSubIdx = MI.getOperand(3);
1222  assert(MOSubIdx.isImm() &&
1223  "One of the subindex of the reg_sequence is not an immediate");
1224  BaseReg.Reg = MOBaseReg.getReg();
1225  BaseReg.SubReg = MOBaseReg.getSubReg();
1226 
1227  InsertedReg.Reg = MOInsertedReg.getReg();
1228  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1229  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1230  return true;
1231 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class...
Definition: MCInstrDesc.h:87
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:634
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:285
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:161
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
void setIsUndef(bool Val=true)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
unsigned getSubReg() const
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
bool isInlineAsm() const
Definition: MachineInstr.h:867
virtual const TargetLowering * getTargetLowering() const
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:539
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
bool isRegSequence() const
Definition: MachineInstr.h:884
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
Definition: MachineInstr.h:938
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, LiveIntervals *LIS=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
unsigned getCallFrameDestroyOpcode() const
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
void setIsRenamable(bool Val=true)
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
bool isInternalRead() const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
A description of a memory reference used in the backend.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:208
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
unsigned getVarIdx() const
Get starting index of non call related arguments (calling convention, statepoint flags, vm state and gc state).
Definition: StackMaps.h:173
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:314
const InstrItinerary * Itineraries
Array of itineraries selected.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
unsigned SubReg
MachineInstr & CloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:501
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:311
const InstrItineraryData * getInstrItineraries() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:308
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
bool isBundle() const
Definition: MachineInstr.h:888
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment. ...
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
Itinerary data supplied by a subtarget to be used by a target.
virtual const TargetInstrInfo * getInstrInfo() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const
Return true if this load instruction never traps and points to a memory location whose value doesn&#39;t ...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:509
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:161
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
TargetInstrInfo - Interface to description of machine instruction set.
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
static const unsigned CommuteAnyOperandIndex
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:566
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:672
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:410
unsigned LoadLatency
Definition: MCSchedule.h:288
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
virtual unsigned getPredicationCost(const MachineInstr &MI) const
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool readsVirtualRegister(unsigned Reg) const
Return true if the MachineInstr reads the specified virtual register.
Definition: MachineInstr.h:973
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:475
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int16_t NumMicroOps
of micro-ops, -1 means it&#39;s variable
void setMBB(MachineBasicBlock *MBB)
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
StringRef getCommentString() const
Definition: MCAsmInfo.h:481
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:128
void setImm(int64_t immVal)
void setIsInternalRead(bool Val=true)
MI-level patchpoint operands.
Definition: StackMaps.h:77
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
Measure the specified inline asm to determine an approximation of its length.
const MachineInstrBuilder & addFrameIndex(int Idx) const
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
Definition: MachineInstr.h:892
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool isInsertSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic INSERT_SUBREG instructions...
Definition: MachineInstr.h:648
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isConstantPhysReg(unsigned PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
void setIsKill(bool Val=true)
The memory access writes data.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:185
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
Iterator for intrusive lists based on ilist_node.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:416
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
bool isInsertSubreg() const
Definition: MachineInstr.h:876
A pair composed of a register and a sub-register index.
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:57
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
Information about stack frame layout on the target.
unsigned HighLatency
Definition: MCSchedule.h:295
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:223
Represents one node in the SelectionDAG.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:924
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:472
virtual bool hasStoreToStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool isEmpty() const
Returns true if there are no itineraries.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:156
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isRegSequenceLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic REG_SEQUENCE instructions.
Definition: MachineInstr.h:619
Representation of each machine instruction.
Definition: MachineInstr.h:60
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MCSchedModel SchedModel
Basic machine properties.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
Definition: StackMaps.h:36
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:73
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:605
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void setSubReg(unsigned subReg)
virtual const TargetFrameLowering * getFrameLowering() const
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
MI-level Statepoint operands.
Definition: StackMaps.h:155
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:659
bool memoperands_empty() const
Return true if we don&#39;t have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:421
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM Value Representation.
Definition: Value.h:73
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
bool isPosition() const
Definition: MachineInstr.h:847
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:172
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
IRTranslator LLVM IR MI
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:492
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index...
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:316
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:247
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool isExtractSubreg() const
Definition: MachineInstr.h:900
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr&#39;s memory reference descriptor list.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:700
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
Definition: MachineInstr.h:575
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:417
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
A pair composed of a pair of a register and a sub-register index, and another sub-register index...
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:967