LLVM  9.0.0svn
TargetInstrInfo.cpp
Go to the documentation of this file.
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
20 #include "llvm/CodeGen/StackMaps.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/MC/MCAsmInfo.h"
32 #include <cctype>
33 
34 using namespace llvm;
35 
37  "disable-sched-hazard", cl::Hidden, cl::init(false),
38  cl::desc("Disable hazard detection during preRA scheduling"));
39 
41 }
42 
44 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
45  const TargetRegisterInfo *TRI,
46  const MachineFunction &MF) const {
47  if (OpNum >= MCID.getNumOperands())
48  return nullptr;
49 
50  short RegClass = MCID.OpInfo[OpNum].RegClass;
51  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
52  return TRI->getPointerRegClass(MF, RegClass);
53 
54  // Instructions like INSERT_SUBREG do not have fixed register classes.
55  if (RegClass < 0)
56  return nullptr;
57 
58  // Otherwise just look it up normally.
59  return TRI->getRegClass(RegClass);
60 }
61 
62 /// insertNoop - Insert a noop into the instruction stream at the specified
63 /// point.
66  llvm_unreachable("Target didn't implement insertNoop!");
67 }
68 
69 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
70  return strncmp(Str, MAI.getCommentString().data(),
71  MAI.getCommentString().size()) == 0;
72 }
73 
74 /// Measure the specified inline asm to determine an approximation of its
75 /// length.
76 /// Comments (which run till the next SeparatorString or newline) do not
77 /// count as an instruction.
78 /// Any other non-whitespace text is considered an instruction, with
79 /// multiple instructions separated by SeparatorString or newlines.
80 /// Variable-length instructions are not handled here; this function
81 /// may be overloaded in the target code to do that.
82 /// We implement a special case of the .space directive which takes only a
83 /// single integer argument in base 10 that is the size in bytes. This is a
84 /// restricted form of the GAS directive in that we only interpret
85 /// simple--i.e. not a logical or arithmetic expression--size values without
86 /// the optional fill value. This is primarily used for creating arbitrary
87 /// sized inline asm blocks for testing purposes.
89  const char *Str,
90  const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
91  // Count the number of instructions in the asm.
92  bool AtInsnStart = true;
93  unsigned Length = 0;
94  const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
95  for (; *Str; ++Str) {
96  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
97  strlen(MAI.getSeparatorString())) == 0) {
98  AtInsnStart = true;
99  } else if (isAsmComment(Str, MAI)) {
100  // Stop counting as an instruction after a comment until the next
101  // separator.
102  AtInsnStart = false;
103  }
104 
105  if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
106  unsigned AddLength = MaxInstLength;
107  if (strncmp(Str, ".space", 6) == 0) {
108  char *EStr;
109  int SpaceSize;
110  SpaceSize = strtol(Str + 6, &EStr, 10);
111  SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
112  while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
113  ++EStr;
114  if (*EStr == '\0' || *EStr == '\n' ||
115  isAsmComment(EStr, MAI)) // Successfully parsed .space argument
116  AddLength = SpaceSize;
117  }
118  Length += AddLength;
119  AtInsnStart = false;
120  }
121  }
122 
123  return Length;
124 }
125 
126 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
127 /// after it, replacing it with an unconditional branch to NewDest.
128 void
130  MachineBasicBlock *NewDest) const {
131  MachineBasicBlock *MBB = Tail->getParent();
132 
133  // Remove all the old successors of MBB from the CFG.
134  while (!MBB->succ_empty())
135  MBB->removeSuccessor(MBB->succ_begin());
136 
137  // Save off the debug loc before erasing the instruction.
138  DebugLoc DL = Tail->getDebugLoc();
139 
140  // Remove all the dead instructions from the end of MBB.
141  MBB->erase(Tail, MBB->end());
142 
143  // If MBB isn't immediately before MBB, insert a branch to it.
145  insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
146  MBB->addSuccessor(NewDest);
147 }
148 
150  bool NewMI, unsigned Idx1,
151  unsigned Idx2) const {
152  const MCInstrDesc &MCID = MI.getDesc();
153  bool HasDef = MCID.getNumDefs();
154  if (HasDef && !MI.getOperand(0).isReg())
155  // No idea how to commute this instruction. Target should implement its own.
156  return nullptr;
157 
158  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
159  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
160  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
161  CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
162  "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
163  assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
164  "This only knows how to commute register operands so far");
165 
166  unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
167  unsigned Reg1 = MI.getOperand(Idx1).getReg();
168  unsigned Reg2 = MI.getOperand(Idx2).getReg();
169  unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
170  unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
171  unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
172  bool Reg1IsKill = MI.getOperand(Idx1).isKill();
173  bool Reg2IsKill = MI.getOperand(Idx2).isKill();
174  bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
175  bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
176  bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
177  bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
178  // Avoid calling isRenamable for virtual registers since we assert that
179  // renamable property is only queried/set for physical registers.
180  bool Reg1IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg1)
181  ? MI.getOperand(Idx1).isRenamable()
182  : false;
183  bool Reg2IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg2)
184  ? MI.getOperand(Idx2).isRenamable()
185  : false;
186  // If destination is tied to either of the commuted source register, then
187  // it must be updated.
188  if (HasDef && Reg0 == Reg1 &&
189  MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
190  Reg2IsKill = false;
191  Reg0 = Reg2;
192  SubReg0 = SubReg2;
193  } else if (HasDef && Reg0 == Reg2 &&
194  MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
195  Reg1IsKill = false;
196  Reg0 = Reg1;
197  SubReg0 = SubReg1;
198  }
199 
200  MachineInstr *CommutedMI = nullptr;
201  if (NewMI) {
202  // Create a new instruction.
203  MachineFunction &MF = *MI.getMF();
204  CommutedMI = MF.CloneMachineInstr(&MI);
205  } else {
206  CommutedMI = &MI;
207  }
208 
209  if (HasDef) {
210  CommutedMI->getOperand(0).setReg(Reg0);
211  CommutedMI->getOperand(0).setSubReg(SubReg0);
212  }
213  CommutedMI->getOperand(Idx2).setReg(Reg1);
214  CommutedMI->getOperand(Idx1).setReg(Reg2);
215  CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
216  CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
217  CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
218  CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
219  CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
220  CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
221  CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
222  CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
223  // Avoid calling setIsRenamable for virtual registers since we assert that
224  // renamable property is only queried/set for physical registers.
226  CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
228  CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
229  return CommutedMI;
230 }
231 
233  unsigned OpIdx1,
234  unsigned OpIdx2) const {
235  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
236  // any commutable operand, which is done in findCommutedOpIndices() method
237  // called below.
238  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
239  !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
240  assert(MI.isCommutable() &&
241  "Precondition violation: MI must be commutable.");
242  return nullptr;
243  }
244  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
245 }
246 
247 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
248  unsigned &ResultIdx2,
249  unsigned CommutableOpIdx1,
250  unsigned CommutableOpIdx2) {
251  if (ResultIdx1 == CommuteAnyOperandIndex &&
252  ResultIdx2 == CommuteAnyOperandIndex) {
253  ResultIdx1 = CommutableOpIdx1;
254  ResultIdx2 = CommutableOpIdx2;
255  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
256  if (ResultIdx2 == CommutableOpIdx1)
257  ResultIdx1 = CommutableOpIdx2;
258  else if (ResultIdx2 == CommutableOpIdx2)
259  ResultIdx1 = CommutableOpIdx1;
260  else
261  return false;
262  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
263  if (ResultIdx1 == CommutableOpIdx1)
264  ResultIdx2 = CommutableOpIdx2;
265  else if (ResultIdx1 == CommutableOpIdx2)
266  ResultIdx2 = CommutableOpIdx1;
267  else
268  return false;
269  } else
270  // Check that the result operand indices match the given commutable
271  // operand indices.
272  return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
273  (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
274 
275  return true;
276 }
277 
279  unsigned &SrcOpIdx1,
280  unsigned &SrcOpIdx2) const {
281  assert(!MI.isBundle() &&
282  "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
283 
284  const MCInstrDesc &MCID = MI.getDesc();
285  if (!MCID.isCommutable())
286  return false;
287 
288  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
289  // is not true, then the target must implement this.
290  unsigned CommutableOpIdx1 = MCID.getNumDefs();
291  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
292  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
293  CommutableOpIdx1, CommutableOpIdx2))
294  return false;
295 
296  if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
297  // No idea.
298  return false;
299  return true;
300 }
301 
303  if (!MI.isTerminator()) return false;
304 
305  // Conditional branch is a special case.
306  if (MI.isBranch() && !MI.isBarrier())
307  return true;
308  if (!MI.isPredicable())
309  return true;
310  return !isPredicated(MI);
311 }
312 
315  bool MadeChange = false;
316 
317  assert(!MI.isBundle() &&
318  "TargetInstrInfo::PredicateInstruction() can't handle bundles");
319 
320  const MCInstrDesc &MCID = MI.getDesc();
321  if (!MI.isPredicable())
322  return false;
323 
324  for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
325  if (MCID.OpInfo[i].isPredicate()) {
326  MachineOperand &MO = MI.getOperand(i);
327  if (MO.isReg()) {
328  MO.setReg(Pred[j].getReg());
329  MadeChange = true;
330  } else if (MO.isImm()) {
331  MO.setImm(Pred[j].getImm());
332  MadeChange = true;
333  } else if (MO.isMBB()) {
334  MO.setMBB(Pred[j].getMBB());
335  MadeChange = true;
336  }
337  ++j;
338  }
339  }
340  return MadeChange;
341 }
342 
344  const MachineInstr &MI,
346  size_t StartSize = Accesses.size();
348  oe = MI.memoperands_end();
349  o != oe; ++o) {
350  if ((*o)->isLoad() &&
351  dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
352  Accesses.push_back(*o);
353  }
354  return Accesses.size() != StartSize;
355 }
356 
358  const MachineInstr &MI,
360  size_t StartSize = Accesses.size();
362  oe = MI.memoperands_end();
363  o != oe; ++o) {
364  if ((*o)->isStore() &&
365  dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
366  Accesses.push_back(*o);
367  }
368  return Accesses.size() != StartSize;
369 }
370 
372  unsigned SubIdx, unsigned &Size,
373  unsigned &Offset,
374  const MachineFunction &MF) const {
376  if (!SubIdx) {
377  Size = TRI->getSpillSize(*RC);
378  Offset = 0;
379  return true;
380  }
381  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
382  // Convert bit size to byte size.
383  if (BitSize % 8)
384  return false;
385 
386  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
387  if (BitOffset < 0 || BitOffset % 8)
388  return false;
389 
390  Size = BitSize /= 8;
391  Offset = (unsigned)BitOffset / 8;
392 
393  assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
394 
395  if (!MF.getDataLayout().isLittleEndian()) {
396  Offset = TRI->getSpillSize(*RC) - (Offset + Size);
397  }
398  return true;
399 }
400 
403  unsigned DestReg, unsigned SubIdx,
404  const MachineInstr &Orig,
405  const TargetRegisterInfo &TRI) const {
406  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
407  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
408  MBB.insert(I, MI);
409 }
410 
412  const MachineInstr &MI1,
413  const MachineRegisterInfo *MRI) const {
415 }
416 
418  MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
419  assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
420  MachineFunction &MF = *MBB.getParent();
421  return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
422 }
423 
424 // If the COPY instruction in MI can be folded to a stack operation, return
425 // the register class to use.
427  unsigned FoldIdx) {
428  assert(MI.isCopy() && "MI must be a COPY instruction");
429  if (MI.getNumOperands() != 2)
430  return nullptr;
431  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
432 
433  const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
434  const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
435 
436  if (FoldOp.getSubReg() || LiveOp.getSubReg())
437  return nullptr;
438 
439  unsigned FoldReg = FoldOp.getReg();
440  unsigned LiveReg = LiveOp.getReg();
441 
443  "Cannot fold physregs");
444 
445  const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
446  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
447 
449  return RC->contains(LiveOp.getReg()) ? RC : nullptr;
450 
451  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
452  return RC;
453 
454  // FIXME: Allow folding when register classes are memory compatible.
455  return nullptr;
456 }
457 
458 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
459  llvm_unreachable("Not implemented");
460 }
461 
464  const TargetInstrInfo &TII) {
465  unsigned StartIdx = 0;
466  switch (MI.getOpcode()) {
467  case TargetOpcode::STACKMAP: {
468  // StackMapLiveValues are foldable
469  StartIdx = StackMapOpers(&MI).getVarIdx();
470  break;
471  }
472  case TargetOpcode::PATCHPOINT: {
473  // For PatchPoint, the call args are not foldable (even if reported in the
474  // stackmap e.g. via anyregcc).
475  StartIdx = PatchPointOpers(&MI).getVarIdx();
476  break;
477  }
478  case TargetOpcode::STATEPOINT: {
479  // For statepoints, fold deopt and gc arguments, but not call arguments.
480  StartIdx = StatepointOpers(&MI).getVarIdx();
481  break;
482  }
483  default:
484  llvm_unreachable("unexpected stackmap opcode");
485  }
486 
487  // Return false if any operands requested for folding are not foldable (not
488  // part of the stackmap's live values).
489  for (unsigned Op : Ops) {
490  if (Op < StartIdx)
491  return nullptr;
492  }
493 
494  MachineInstr *NewMI =
495  MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
496  MachineInstrBuilder MIB(MF, NewMI);
497 
498  // No need to fold return, the meta data, and function arguments
499  for (unsigned i = 0; i < StartIdx; ++i)
500  MIB.add(MI.getOperand(i));
501 
502  for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
503  MachineOperand &MO = MI.getOperand(i);
504  if (is_contained(Ops, i)) {
505  unsigned SpillSize;
506  unsigned SpillOffset;
507  // Compute the spill slot size and offset.
508  const TargetRegisterClass *RC =
509  MF.getRegInfo().getRegClass(MO.getReg());
510  bool Valid =
511  TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
512  if (!Valid)
513  report_fatal_error("cannot spill patchpoint subregister operand");
514  MIB.addImm(StackMaps::IndirectMemRefOp);
515  MIB.addImm(SpillSize);
516  MIB.addFrameIndex(FrameIndex);
517  MIB.addImm(SpillOffset);
518  }
519  else
520  MIB.add(MO);
521  }
522  return NewMI;
523 }
524 
526  ArrayRef<unsigned> Ops, int FI,
527  LiveIntervals *LIS,
528  VirtRegMap *VRM) const {
529  auto Flags = MachineMemOperand::MONone;
530  for (unsigned OpIdx : Ops)
531  Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
533 
534  MachineBasicBlock *MBB = MI.getParent();
535  assert(MBB && "foldMemoryOperand needs an inserted instruction");
536  MachineFunction &MF = *MBB->getParent();
537 
538  // If we're not folding a load into a subreg, the size of the load is the
539  // size of the spill slot. But if we are, we need to figure out what the
540  // actual load size is.
541  int64_t MemSize = 0;
542  const MachineFrameInfo &MFI = MF.getFrameInfo();
544 
545  if (Flags & MachineMemOperand::MOStore) {
546  MemSize = MFI.getObjectSize(FI);
547  } else {
548  for (unsigned OpIdx : Ops) {
549  int64_t OpSize = MFI.getObjectSize(FI);
550 
551  if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
552  unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
553  if (SubRegSize > 0 && !(SubRegSize % 8))
554  OpSize = SubRegSize / 8;
555  }
556 
557  MemSize = std::max(MemSize, OpSize);
558  }
559  }
560 
561  assert(MemSize && "Did not expect a zero-sized stack slot");
562 
563  MachineInstr *NewMI = nullptr;
564 
565  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
566  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
567  MI.getOpcode() == TargetOpcode::STATEPOINT) {
568  // Fold stackmap/patchpoint.
569  NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
570  if (NewMI)
571  MBB->insert(MI, NewMI);
572  } else {
573  // Ask the target to do the actual folding.
574  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
575  }
576 
577  if (NewMI) {
578  NewMI->setMemRefs(MF, MI.memoperands());
579  // Add a memory operand, foldMemoryOperandImpl doesn't do that.
580  assert((!(Flags & MachineMemOperand::MOStore) ||
581  NewMI->mayStore()) &&
582  "Folded a def to a non-store!");
583  assert((!(Flags & MachineMemOperand::MOLoad) ||
584  NewMI->mayLoad()) &&
585  "Folded a use to a non-load!");
586  assert(MFI.getObjectOffset(FI) != -1);
588  MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
589  MFI.getObjectAlignment(FI));
590  NewMI->addMemOperand(MF, MMO);
591 
592  return NewMI;
593  }
594 
595  // Straight COPY may fold as load/store.
596  if (!MI.isCopy() || Ops.size() != 1)
597  return nullptr;
598 
599  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
600  if (!RC)
601  return nullptr;
602 
603  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
605 
606  if (Flags == MachineMemOperand::MOStore)
607  storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
608  else
609  loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
610  return &*--Pos;
611 }
612 
614  ArrayRef<unsigned> Ops,
615  MachineInstr &LoadMI,
616  LiveIntervals *LIS) const {
617  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
618 #ifndef NDEBUG
619  for (unsigned OpIdx : Ops)
620  assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
621 #endif
622 
623  MachineBasicBlock &MBB = *MI.getParent();
624  MachineFunction &MF = *MBB.getParent();
625 
626  // Ask the target to do the actual folding.
627  MachineInstr *NewMI = nullptr;
628  int FrameIndex = 0;
629 
630  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
631  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
632  MI.getOpcode() == TargetOpcode::STATEPOINT) &&
633  isLoadFromStackSlot(LoadMI, FrameIndex)) {
634  // Fold stackmap/patchpoint.
635  NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
636  if (NewMI)
637  NewMI = &*MBB.insert(MI, NewMI);
638  } else {
639  // Ask the target to do the actual folding.
640  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
641  }
642 
643  if (!NewMI)
644  return nullptr;
645 
646  // Copy the memoperands from the load to the folded instruction.
647  if (MI.memoperands_empty()) {
648  NewMI->setMemRefs(MF, LoadMI.memoperands());
649  } else {
650  // Handle the rare case of folding multiple loads.
651  NewMI->setMemRefs(MF, MI.memoperands());
653  E = LoadMI.memoperands_end();
654  I != E; ++I) {
655  NewMI->addMemOperand(MF, *I);
656  }
657  }
658  return NewMI;
659 }
660 
662  const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
663  const MachineOperand &Op1 = Inst.getOperand(1);
664  const MachineOperand &Op2 = Inst.getOperand(2);
665  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
666 
667  // We need virtual register definitions for the operands that we will
668  // reassociate.
669  MachineInstr *MI1 = nullptr;
670  MachineInstr *MI2 = nullptr;
672  MI1 = MRI.getUniqueVRegDef(Op1.getReg());
674  MI2 = MRI.getUniqueVRegDef(Op2.getReg());
675 
676  // And they need to be in the trace (otherwise, they won't have a depth).
677  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
678 }
679 
681  bool &Commuted) const {
682  const MachineBasicBlock *MBB = Inst.getParent();
683  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
684  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
685  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
686  unsigned AssocOpcode = Inst.getOpcode();
687 
688  // If only one operand has the same opcode and it's the second source operand,
689  // the operands must be commuted.
690  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
691  if (Commuted)
692  std::swap(MI1, MI2);
693 
694  // 1. The previous instruction must be the same type as Inst.
695  // 2. The previous instruction must have virtual register definitions for its
696  // operands in the same basic block as Inst.
697  // 3. The previous instruction's result must only be used by Inst.
698  return MI1->getOpcode() == AssocOpcode &&
699  hasReassociableOperands(*MI1, MBB) &&
700  MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
701 }
702 
703 // 1. The operation must be associative and commutative.
704 // 2. The instruction must have virtual register definitions for its
705 // operands in the same basic block.
706 // 3. The instruction must have a reassociable sibling.
708  bool &Commuted) const {
709  return isAssociativeAndCommutative(Inst) &&
710  hasReassociableOperands(Inst, Inst.getParent()) &&
711  hasReassociableSibling(Inst, Commuted);
712 }
713 
714 // The concept of the reassociation pass is that these operations can benefit
715 // from this kind of transformation:
716 //
717 // A = ? op ?
718 // B = A op X (Prev)
719 // C = B op Y (Root)
720 // -->
721 // A = ? op ?
722 // B = X op Y
723 // C = A op B
724 //
725 // breaking the dependency between A and B, allowing them to be executed in
726 // parallel (or back-to-back in a pipeline) instead of depending on each other.
727 
728 // FIXME: This has the potential to be expensive (compile time) while not
729 // improving the code at all. Some ways to limit the overhead:
730 // 1. Track successful transforms; bail out if hit rate gets too low.
731 // 2. Only enable at -O3 or some other non-default optimization level.
732 // 3. Pre-screen pattern candidates here: if an operand of the previous
733 // instruction is known to not increase the critical path, then don't match
734 // that pattern.
736  MachineInstr &Root,
737  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
738  bool Commute;
739  if (isReassociationCandidate(Root, Commute)) {
740  // We found a sequence of instructions that may be suitable for a
741  // reassociation of operands to increase ILP. Specify each commutation
742  // possibility for the Prev instruction in the sequence and let the
743  // machine combiner decide if changing the operands is worthwhile.
744  if (Commute) {
747  } else {
750  }
751  return true;
752  }
753 
754  return false;
755 }
756 
757 /// Return true when a code sequence can improve loop throughput.
758 bool
760  return false;
761 }
762 
763 /// Attempt the reassociation transformation to reduce critical path length.
764 /// See the above comments before getMachineCombinerPatterns().
766  MachineInstr &Root, MachineInstr &Prev,
767  MachineCombinerPattern Pattern,
770  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
771  MachineFunction *MF = Root.getMF();
773  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
775  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
776 
777  // This array encodes the operand index for each parameter because the
778  // operands may be commuted. Each row corresponds to a pattern value,
779  // and each column specifies the index of A, B, X, Y.
780  unsigned OpIdx[4][4] = {
781  { 1, 1, 2, 2 },
782  { 1, 2, 2, 1 },
783  { 2, 1, 1, 2 },
784  { 2, 2, 1, 1 }
785  };
786 
787  int Row;
788  switch (Pattern) {
789  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
790  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
791  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
792  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
793  default: llvm_unreachable("unexpected MachineCombinerPattern");
794  }
795 
796  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
797  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
798  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
799  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
800  MachineOperand &OpC = Root.getOperand(0);
801 
802  unsigned RegA = OpA.getReg();
803  unsigned RegB = OpB.getReg();
804  unsigned RegX = OpX.getReg();
805  unsigned RegY = OpY.getReg();
806  unsigned RegC = OpC.getReg();
807 
809  MRI.constrainRegClass(RegA, RC);
811  MRI.constrainRegClass(RegB, RC);
813  MRI.constrainRegClass(RegX, RC);
815  MRI.constrainRegClass(RegY, RC);
817  MRI.constrainRegClass(RegC, RC);
818 
819  // Create a new virtual register for the result of (X op Y) instead of
820  // recycling RegB because the MachineCombiner's computation of the critical
821  // path requires a new register definition rather than an existing one.
822  unsigned NewVR = MRI.createVirtualRegister(RC);
823  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
824 
825  unsigned Opcode = Root.getOpcode();
826  bool KillA = OpA.isKill();
827  bool KillX = OpX.isKill();
828  bool KillY = OpY.isKill();
829 
830  // Create new instructions for insertion.
831  MachineInstrBuilder MIB1 =
832  BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
833  .addReg(RegX, getKillRegState(KillX))
834  .addReg(RegY, getKillRegState(KillY));
835  MachineInstrBuilder MIB2 =
836  BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
837  .addReg(RegA, getKillRegState(KillA))
838  .addReg(NewVR, getKillRegState(true));
839 
840  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
841 
842  // Record new instructions for insertion and old instructions for deletion.
843  InsInstrs.push_back(MIB1);
844  InsInstrs.push_back(MIB2);
845  DelInstrs.push_back(&Prev);
846  DelInstrs.push_back(&Root);
847 }
848 
850  MachineInstr &Root, MachineCombinerPattern Pattern,
853  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
855 
856  // Select the previous instruction in the sequence based on the input pattern.
857  MachineInstr *Prev = nullptr;
858  switch (Pattern) {
861  Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
862  break;
865  Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
866  break;
867  default:
868  break;
869  }
870 
871  assert(Prev && "Unknown pattern for machine combiner");
872 
873  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
874 }
875 
876 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
877  const MachineInstr &MI, AliasAnalysis *AA) const {
878  const MachineFunction &MF = *MI.getMF();
879  const MachineRegisterInfo &MRI = MF.getRegInfo();
880 
881  // Remat clients assume operand 0 is the defined register.
882  if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
883  return false;
884  unsigned DefReg = MI.getOperand(0).getReg();
885 
886  // A sub-register definition can only be rematerialized if the instruction
887  // doesn't read the other parts of the register. Otherwise it is really a
888  // read-modify-write operation on the full virtual register which cannot be
889  // moved safely.
891  MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
892  return false;
893 
894  // A load from a fixed stack slot can be rematerialized. This may be
895  // redundant with subsequent checks, but it's target-independent,
896  // simple, and a common case.
897  int FrameIdx = 0;
898  if (isLoadFromStackSlot(MI, FrameIdx) &&
899  MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
900  return true;
901 
902  // Avoid instructions obviously unsafe for remat.
903  if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
905  return false;
906 
907  // Don't remat inline asm. We have no idea how expensive it is
908  // even if it's side effect free.
909  if (MI.isInlineAsm())
910  return false;
911 
912  // Avoid instructions which load from potentially varying memory.
913  if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
914  return false;
915 
916  // If any of the registers accessed are non-constant, conservatively assume
917  // the instruction is not rematerializable.
918  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
919  const MachineOperand &MO = MI.getOperand(i);
920  if (!MO.isReg()) continue;
921  unsigned Reg = MO.getReg();
922  if (Reg == 0)
923  continue;
924 
925  // Check for a well-behaved physical register.
927  if (MO.isUse()) {
928  // If the physreg has no defs anywhere, it's just an ambient register
929  // and we can freely move its uses. Alternatively, if it's allocatable,
930  // it could get allocated to something with a def during allocation.
931  if (!MRI.isConstantPhysReg(Reg))
932  return false;
933  } else {
934  // A physreg def. We can't remat it.
935  return false;
936  }
937  continue;
938  }
939 
940  // Only allow one virtual-register def. There may be multiple defs of the
941  // same virtual register, though.
942  if (MO.isDef() && Reg != DefReg)
943  return false;
944 
945  // Don't allow any virtual-register uses. Rematting an instruction with
946  // virtual register uses would length the live ranges of the uses, which
947  // is not necessarily a good idea, certainly not "trivial".
948  if (MO.isUse())
949  return false;
950  }
951 
952  // Everything checked out.
953  return true;
954 }
955 
957  const MachineFunction *MF = MI.getMF();
959  bool StackGrowsDown =
961 
962  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
963  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
964 
965  if (!isFrameInstr(MI))
966  return 0;
967 
968  int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
969 
970  if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
971  (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
972  SPAdj = -SPAdj;
973 
974  return SPAdj;
975 }
976 
977 /// isSchedulingBoundary - Test if the given instruction should be
978 /// considered a scheduling boundary. This primarily includes labels
979 /// and terminators.
981  const MachineBasicBlock *MBB,
982  const MachineFunction &MF) const {
983  // Terminators and labels can't be scheduled around.
984  if (MI.isTerminator() || MI.isPosition())
985  return true;
986 
987  // Don't attempt to schedule around any instruction that defines
988  // a stack-oriented pointer, as it's unlikely to be profitable. This
989  // saves compile time, because it doesn't require every single
990  // stack slot reference to depend on the instruction that does the
991  // modification.
992  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
995 }
996 
997 // Provide a global flag for disabling the PreRA hazard recognizer that targets
998 // may choose to honor.
1000  return !DisableHazardRecognizer;
1001 }
1002 
1003 // Default implementation of CreateTargetRAHazardRecognizer.
1006  const ScheduleDAG *DAG) const {
1007  // Dummy hazard recognizer allows all instructions to issue.
1008  return new ScheduleHazardRecognizer();
1009 }
1010 
1011 // Default implementation of CreateTargetMIHazardRecognizer.
1014  const ScheduleDAG *DAG) const {
1015  return (ScheduleHazardRecognizer *)
1016  new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1017 }
1018 
1019 // Default implementation of CreateTargetPostRAHazardRecognizer.
1022  const ScheduleDAG *DAG) const {
1023  return (ScheduleHazardRecognizer *)
1024  new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1025 }
1026 
1027 //===----------------------------------------------------------------------===//
1028 // SelectionDAG latency interface.
1029 //===----------------------------------------------------------------------===//
1030 
1031 int
1033  SDNode *DefNode, unsigned DefIdx,
1034  SDNode *UseNode, unsigned UseIdx) const {
1035  if (!ItinData || ItinData->isEmpty())
1036  return -1;
1037 
1038  if (!DefNode->isMachineOpcode())
1039  return -1;
1040 
1041  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1042  if (!UseNode->isMachineOpcode())
1043  return ItinData->getOperandCycle(DefClass, DefIdx);
1044  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1045  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1046 }
1047 
1049  SDNode *N) const {
1050  if (!ItinData || ItinData->isEmpty())
1051  return 1;
1052 
1053  if (!N->isMachineOpcode())
1054  return 1;
1055 
1056  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1057 }
1058 
1059 //===----------------------------------------------------------------------===//
1060 // MachineInstr latency interface.
1061 //===----------------------------------------------------------------------===//
1062 
1064  const MachineInstr &MI) const {
1065  if (!ItinData || ItinData->isEmpty())
1066  return 1;
1067 
1068  unsigned Class = MI.getDesc().getSchedClass();
1069  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1070  if (UOps >= 0)
1071  return UOps;
1072 
1073  // The # of u-ops is dynamically determined. The specific target should
1074  // override this function to return the right number.
1075  return 1;
1076 }
1077 
1078 /// Return the default expected latency for a def based on it's opcode.
1080  const MachineInstr &DefMI) const {
1081  if (DefMI.isTransient())
1082  return 0;
1083  if (DefMI.mayLoad())
1084  return SchedModel.LoadLatency;
1085  if (isHighLatencyDef(DefMI.getOpcode()))
1086  return SchedModel.HighLatency;
1087  return 1;
1088 }
1089 
1091  return 0;
1092 }
1093 
1095  const MachineInstr &MI,
1096  unsigned *PredCost) const {
1097  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1098  // still have a MinLatency property, which getStageLatency checks.
1099  if (!ItinData)
1100  return MI.mayLoad() ? 2 : 1;
1101 
1102  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1103 }
1104 
1106  const MachineInstr &DefMI,
1107  unsigned DefIdx) const {
1108  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1109  if (!ItinData || ItinData->isEmpty())
1110  return false;
1111 
1112  unsigned DefClass = DefMI.getDesc().getSchedClass();
1113  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1114  return (DefCycle != -1 && DefCycle <= 1);
1115 }
1116 
1117 /// Both DefMI and UseMI must be valid. By default, call directly to the
1118 /// itinerary. This may be overriden by the target.
1120  const MachineInstr &DefMI,
1121  unsigned DefIdx,
1122  const MachineInstr &UseMI,
1123  unsigned UseIdx) const {
1124  unsigned DefClass = DefMI.getDesc().getSchedClass();
1125  unsigned UseClass = UseMI.getDesc().getSchedClass();
1126  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1127 }
1128 
1129 /// If we can determine the operand latency from the def only, without itinerary
1130 /// lookup, do so. Otherwise return -1.
1132  const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1133 
1134  // Let the target hook getInstrLatency handle missing itineraries.
1135  if (!ItinData)
1136  return getInstrLatency(ItinData, DefMI);
1137 
1138  if(ItinData->isEmpty())
1139  return defaultDefLatency(ItinData->SchedModel, DefMI);
1140 
1141  // ...operand lookup required
1142  return -1;
1143 }
1144 
1146  const MachineInstr &MI, unsigned DefIdx,
1147  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1148  assert((MI.isRegSequence() ||
1149  MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1150 
1151  if (!MI.isRegSequence())
1152  return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1153 
1154  // We are looking at:
1155  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1156  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1157  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1158  OpIdx += 2) {
1159  const MachineOperand &MOReg = MI.getOperand(OpIdx);
1160  if (MOReg.isUndef())
1161  continue;
1162  const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1163  assert(MOSubIdx.isImm() &&
1164  "One of the subindex of the reg_sequence is not an immediate");
1165  // Record Reg:SubReg, SubIdx.
1166  InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1167  (unsigned)MOSubIdx.getImm()));
1168  }
1169  return true;
1170 }
1171 
1173  const MachineInstr &MI, unsigned DefIdx,
1174  RegSubRegPairAndIdx &InputReg) const {
1175  assert((MI.isExtractSubreg() ||
1176  MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1177 
1178  if (!MI.isExtractSubreg())
1179  return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1180 
1181  // We are looking at:
1182  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1183  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1184  const MachineOperand &MOReg = MI.getOperand(1);
1185  if (MOReg.isUndef())
1186  return false;
1187  const MachineOperand &MOSubIdx = MI.getOperand(2);
1188  assert(MOSubIdx.isImm() &&
1189  "The subindex of the extract_subreg is not an immediate");
1190 
1191  InputReg.Reg = MOReg.getReg();
1192  InputReg.SubReg = MOReg.getSubReg();
1193  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1194  return true;
1195 }
1196 
1198  const MachineInstr &MI, unsigned DefIdx,
1199  RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1200  assert((MI.isInsertSubreg() ||
1201  MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1202 
1203  if (!MI.isInsertSubreg())
1204  return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1205 
1206  // We are looking at:
1207  // Def = INSERT_SEQUENCE v0, v1, sub0.
1208  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1209  const MachineOperand &MOBaseReg = MI.getOperand(1);
1210  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1211  if (MOInsertedReg.isUndef())
1212  return false;
1213  const MachineOperand &MOSubIdx = MI.getOperand(3);
1214  assert(MOSubIdx.isImm() &&
1215  "One of the subindex of the reg_sequence is not an immediate");
1216  BaseReg.Reg = MOBaseReg.getReg();
1217  BaseReg.SubReg = MOBaseReg.getSubReg();
1218 
1219  InsertedReg.Reg = MOInsertedReg.getReg();
1220  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1221  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1222  return true;
1223 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class...
Definition: MCInstrDesc.h:86
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:783
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:384
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:164
unsigned getReg() const
getReg - Returns the register number.
void setIsUndef(bool Val=true)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
unsigned getSubReg() const
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
bool isInlineAsm() const
virtual const TargetLowering * getTargetLowering() const
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:688
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
bool isRegSequence() const
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
unsigned getCallFrameDestroyOpcode() const
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
void setIsRenamable(bool Val=true)
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
bool isInternalRead() const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
A description of a memory reference used in the backend.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:211
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
unsigned getVarIdx() const
Get starting index of non call related arguments (calling convention, statepoint flags, vm state and gc state).
Definition: StackMaps.h:172
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:413
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
const InstrItinerary * Itineraries
Array of itineraries selected.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
unsigned SubReg
MachineInstr & CloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:650
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:410
const InstrItineraryData * getInstrItineraries() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:407
bool isBundle() const
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
Definition: MachineInstr.h:841
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment. ...
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:232
Itinerary data supplied by a subtarget to be used by a target.
virtual const TargetInstrInfo * getInstrInfo() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const
Return true if this load instruction never traps and points to a memory location whose value doesn&#39;t ...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:658
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:158
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
TargetInstrInfo - Interface to description of machine instruction set.
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
static const unsigned CommuteAnyOperandIndex
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:582
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:821
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
unsigned LoadLatency
Definition: MCSchedule.h:285
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
virtual unsigned getPredicationCost(const MachineInstr &MI) const
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:517
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool readsVirtualRegister(unsigned Reg) const
Return true if the MachineInstr reads the specified virtual register.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:486
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int16_t NumMicroOps
of micro-ops, -1 means it&#39;s variable
void setMBB(MachineBasicBlock *MBB)
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
StringRef getCommentString() const
Definition: MCAsmInfo.h:492
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:127
void setImm(int64_t immVal)
void setIsInternalRead(bool Val=true)
MI-level patchpoint operands.
Definition: StackMaps.h:76
const MachineInstrBuilder & addFrameIndex(int Idx) const
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool isInsertSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic INSERT_SUBREG instructions...
Definition: MachineInstr.h:797
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isConstantPhysReg(unsigned PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
void setIsKill(bool Val=true)
The memory access writes data.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:188
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
Iterator for intrusive lists based on ilist_node.
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:535
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
bool isInsertSubreg() const
A pair composed of a register and a sub-register index.
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:56
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
Information about stack frame layout on the target.
unsigned HighLatency
Definition: MCSchedule.h:292
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:226
Represents one node in the SelectionDAG.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool isEmpty() const
Returns true if there are no itineraries.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:255
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isRegSequenceLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic REG_SEQUENCE instructions.
Definition: MachineInstr.h:768
Representation of each machine instruction.
Definition: MachineInstr.h:63
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MCSchedModel SchedModel
Basic machine properties.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
Definition: StackMaps.h:35
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:72
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:754
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void setSubReg(unsigned subReg)
virtual const TargetFrameLowering * getFrameLowering() const
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
MI-level Statepoint operands.
Definition: StackMaps.h:154
uint32_t Size
Definition: Profile.cpp:46
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_NODISCARD const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:122
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:808
bool memoperands_empty() const
Return true if we don&#39;t have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:547
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand *> MemRefs)
Assign this MachineInstr&#39;s memory reference descriptor list.
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
bool isPosition() const
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:175
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition: MCAsmInfo.h:480
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
IRTranslator LLVM IR MI
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:641
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool isExtractSubreg() const
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:860
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
Definition: MachineInstr.h:724
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:542
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
A pair composed of a pair of a register and a sub-register index, and another sub-register index...
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1251