LLVM  8.0.0svn
TargetInstrInfo.cpp
Go to the documentation of this file.
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
21 #include "llvm/CodeGen/StackMaps.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/MC/MCAsmInfo.h"
33 #include <cctype>
34 
35 using namespace llvm;
36 
38  "disable-sched-hazard", cl::Hidden, cl::init(false),
39  cl::desc("Disable hazard detection during preRA scheduling"));
40 
42 }
43 
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46  const TargetRegisterInfo *TRI,
47  const MachineFunction &MF) const {
48  if (OpNum >= MCID.getNumOperands())
49  return nullptr;
50 
51  short RegClass = MCID.OpInfo[OpNum].RegClass;
52  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53  return TRI->getPointerRegClass(MF, RegClass);
54 
55  // Instructions like INSERT_SUBREG do not have fixed register classes.
56  if (RegClass < 0)
57  return nullptr;
58 
59  // Otherwise just look it up normally.
60  return TRI->getRegClass(RegClass);
61 }
62 
63 /// insertNoop - Insert a noop into the instruction stream at the specified
64 /// point.
67  llvm_unreachable("Target didn't implement insertNoop!");
68 }
69 
70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
71  return strncmp(Str, MAI.getCommentString().data(),
72  MAI.getCommentString().size()) == 0;
73 }
74 
75 /// Measure the specified inline asm to determine an approximation of its
76 /// length.
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
89 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
90  const MCAsmInfo &MAI) const {
91  // Count the number of instructions in the asm.
92  bool AtInsnStart = true;
93  unsigned Length = 0;
94  for (; *Str; ++Str) {
95  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
96  strlen(MAI.getSeparatorString())) == 0) {
97  AtInsnStart = true;
98  } else if (isAsmComment(Str, MAI)) {
99  // Stop counting as an instruction after a comment until the next
100  // separator.
101  AtInsnStart = false;
102  }
103 
104  if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
105  unsigned AddLength = MAI.getMaxInstLength();
106  if (strncmp(Str, ".space", 6) == 0) {
107  char *EStr;
108  int SpaceSize;
109  SpaceSize = strtol(Str + 6, &EStr, 10);
110  SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
111  while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
112  ++EStr;
113  if (*EStr == '\0' || *EStr == '\n' ||
114  isAsmComment(EStr, MAI)) // Successfully parsed .space argument
115  AddLength = SpaceSize;
116  }
117  Length += AddLength;
118  AtInsnStart = false;
119  }
120  }
121 
122  return Length;
123 }
124 
125 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
126 /// after it, replacing it with an unconditional branch to NewDest.
127 void
129  MachineBasicBlock *NewDest) const {
130  MachineBasicBlock *MBB = Tail->getParent();
131 
132  // Remove all the old successors of MBB from the CFG.
133  while (!MBB->succ_empty())
134  MBB->removeSuccessor(MBB->succ_begin());
135 
136  // Save off the debug loc before erasing the instruction.
137  DebugLoc DL = Tail->getDebugLoc();
138 
139  // Remove all the dead instructions from the end of MBB.
140  MBB->erase(Tail, MBB->end());
141 
142  // If MBB isn't immediately before MBB, insert a branch to it.
144  insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
145  MBB->addSuccessor(NewDest);
146 }
147 
149  bool NewMI, unsigned Idx1,
150  unsigned Idx2) const {
151  const MCInstrDesc &MCID = MI.getDesc();
152  bool HasDef = MCID.getNumDefs();
153  if (HasDef && !MI.getOperand(0).isReg())
154  // No idea how to commute this instruction. Target should implement its own.
155  return nullptr;
156 
157  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
158  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
159  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
160  CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
161  "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
162  assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
163  "This only knows how to commute register operands so far");
164 
165  unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
166  unsigned Reg1 = MI.getOperand(Idx1).getReg();
167  unsigned Reg2 = MI.getOperand(Idx2).getReg();
168  unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
169  unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
170  unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
171  bool Reg1IsKill = MI.getOperand(Idx1).isKill();
172  bool Reg2IsKill = MI.getOperand(Idx2).isKill();
173  bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
174  bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
175  bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
176  bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
177  // Avoid calling isRenamable for virtual registers since we assert that
178  // renamable property is only queried/set for physical registers.
179  bool Reg1IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg1)
180  ? MI.getOperand(Idx1).isRenamable()
181  : false;
182  bool Reg2IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg2)
183  ? MI.getOperand(Idx2).isRenamable()
184  : false;
185  // If destination is tied to either of the commuted source register, then
186  // it must be updated.
187  if (HasDef && Reg0 == Reg1 &&
188  MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
189  Reg2IsKill = false;
190  Reg0 = Reg2;
191  SubReg0 = SubReg2;
192  } else if (HasDef && Reg0 == Reg2 &&
193  MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
194  Reg1IsKill = false;
195  Reg0 = Reg1;
196  SubReg0 = SubReg1;
197  }
198 
199  MachineInstr *CommutedMI = nullptr;
200  if (NewMI) {
201  // Create a new instruction.
202  MachineFunction &MF = *MI.getMF();
203  CommutedMI = MF.CloneMachineInstr(&MI);
204  } else {
205  CommutedMI = &MI;
206  }
207 
208  if (HasDef) {
209  CommutedMI->getOperand(0).setReg(Reg0);
210  CommutedMI->getOperand(0).setSubReg(SubReg0);
211  }
212  CommutedMI->getOperand(Idx2).setReg(Reg1);
213  CommutedMI->getOperand(Idx1).setReg(Reg2);
214  CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
215  CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
216  CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
217  CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
218  CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
219  CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
220  CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
221  CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
222  // Avoid calling setIsRenamable for virtual registers since we assert that
223  // renamable property is only queried/set for physical registers.
225  CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
227  CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
228  return CommutedMI;
229 }
230 
232  unsigned OpIdx1,
233  unsigned OpIdx2) const {
234  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
235  // any commutable operand, which is done in findCommutedOpIndices() method
236  // called below.
237  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
238  !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
239  assert(MI.isCommutable() &&
240  "Precondition violation: MI must be commutable.");
241  return nullptr;
242  }
243  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
244 }
245 
246 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
247  unsigned &ResultIdx2,
248  unsigned CommutableOpIdx1,
249  unsigned CommutableOpIdx2) {
250  if (ResultIdx1 == CommuteAnyOperandIndex &&
251  ResultIdx2 == CommuteAnyOperandIndex) {
252  ResultIdx1 = CommutableOpIdx1;
253  ResultIdx2 = CommutableOpIdx2;
254  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
255  if (ResultIdx2 == CommutableOpIdx1)
256  ResultIdx1 = CommutableOpIdx2;
257  else if (ResultIdx2 == CommutableOpIdx2)
258  ResultIdx1 = CommutableOpIdx1;
259  else
260  return false;
261  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
262  if (ResultIdx1 == CommutableOpIdx1)
263  ResultIdx2 = CommutableOpIdx2;
264  else if (ResultIdx1 == CommutableOpIdx2)
265  ResultIdx2 = CommutableOpIdx1;
266  else
267  return false;
268  } else
269  // Check that the result operand indices match the given commutable
270  // operand indices.
271  return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
272  (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
273 
274  return true;
275 }
276 
278  unsigned &SrcOpIdx1,
279  unsigned &SrcOpIdx2) const {
280  assert(!MI.isBundle() &&
281  "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
282 
283  const MCInstrDesc &MCID = MI.getDesc();
284  if (!MCID.isCommutable())
285  return false;
286 
287  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
288  // is not true, then the target must implement this.
289  unsigned CommutableOpIdx1 = MCID.getNumDefs();
290  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
291  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
292  CommutableOpIdx1, CommutableOpIdx2))
293  return false;
294 
295  if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
296  // No idea.
297  return false;
298  return true;
299 }
300 
302  if (!MI.isTerminator()) return false;
303 
304  // Conditional branch is a special case.
305  if (MI.isBranch() && !MI.isBarrier())
306  return true;
307  if (!MI.isPredicable())
308  return true;
309  return !isPredicated(MI);
310 }
311 
314  bool MadeChange = false;
315 
316  assert(!MI.isBundle() &&
317  "TargetInstrInfo::PredicateInstruction() can't handle bundles");
318 
319  const MCInstrDesc &MCID = MI.getDesc();
320  if (!MI.isPredicable())
321  return false;
322 
323  for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
324  if (MCID.OpInfo[i].isPredicate()) {
325  MachineOperand &MO = MI.getOperand(i);
326  if (MO.isReg()) {
327  MO.setReg(Pred[j].getReg());
328  MadeChange = true;
329  } else if (MO.isImm()) {
330  MO.setImm(Pred[j].getImm());
331  MadeChange = true;
332  } else if (MO.isMBB()) {
333  MO.setMBB(Pred[j].getMBB());
334  MadeChange = true;
335  }
336  ++j;
337  }
338  }
339  return MadeChange;
340 }
341 
343  const MachineInstr &MI,
345  size_t StartSize = Accesses.size();
347  oe = MI.memoperands_end();
348  o != oe; ++o) {
349  if ((*o)->isLoad() &&
350  dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
351  Accesses.push_back(*o);
352  }
353  return Accesses.size() != StartSize;
354 }
355 
357  const MachineInstr &MI,
359  size_t StartSize = Accesses.size();
361  oe = MI.memoperands_end();
362  o != oe; ++o) {
363  if ((*o)->isStore() &&
364  dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
365  Accesses.push_back(*o);
366  }
367  return Accesses.size() != StartSize;
368 }
369 
371  unsigned SubIdx, unsigned &Size,
372  unsigned &Offset,
373  const MachineFunction &MF) const {
375  if (!SubIdx) {
376  Size = TRI->getSpillSize(*RC);
377  Offset = 0;
378  return true;
379  }
380  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
381  // Convert bit size to byte size.
382  if (BitSize % 8)
383  return false;
384 
385  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
386  if (BitOffset < 0 || BitOffset % 8)
387  return false;
388 
389  Size = BitSize /= 8;
390  Offset = (unsigned)BitOffset / 8;
391 
392  assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
393 
394  if (!MF.getDataLayout().isLittleEndian()) {
395  Offset = TRI->getSpillSize(*RC) - (Offset + Size);
396  }
397  return true;
398 }
399 
402  unsigned DestReg, unsigned SubIdx,
403  const MachineInstr &Orig,
404  const TargetRegisterInfo &TRI) const {
405  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
406  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
407  MBB.insert(I, MI);
408 }
409 
411  const MachineInstr &MI1,
412  const MachineRegisterInfo *MRI) const {
414 }
415 
417  MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
418  assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
419  MachineFunction &MF = *MBB.getParent();
420  return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
421 }
422 
423 // If the COPY instruction in MI can be folded to a stack operation, return
424 // the register class to use.
426  unsigned FoldIdx) {
427  assert(MI.isCopy() && "MI must be a COPY instruction");
428  if (MI.getNumOperands() != 2)
429  return nullptr;
430  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
431 
432  const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
433  const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
434 
435  if (FoldOp.getSubReg() || LiveOp.getSubReg())
436  return nullptr;
437 
438  unsigned FoldReg = FoldOp.getReg();
439  unsigned LiveReg = LiveOp.getReg();
440 
442  "Cannot fold physregs");
443 
444  const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
445  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
446 
448  return RC->contains(LiveOp.getReg()) ? RC : nullptr;
449 
450  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
451  return RC;
452 
453  // FIXME: Allow folding when register classes are memory compatible.
454  return nullptr;
455 }
456 
457 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
458  llvm_unreachable("Not implemented");
459 }
460 
463  const TargetInstrInfo &TII) {
464  unsigned StartIdx = 0;
465  switch (MI.getOpcode()) {
466  case TargetOpcode::STACKMAP: {
467  // StackMapLiveValues are foldable
468  StartIdx = StackMapOpers(&MI).getVarIdx();
469  break;
470  }
471  case TargetOpcode::PATCHPOINT: {
472  // For PatchPoint, the call args are not foldable (even if reported in the
473  // stackmap e.g. via anyregcc).
474  StartIdx = PatchPointOpers(&MI).getVarIdx();
475  break;
476  }
477  case TargetOpcode::STATEPOINT: {
478  // For statepoints, fold deopt and gc arguments, but not call arguments.
479  StartIdx = StatepointOpers(&MI).getVarIdx();
480  break;
481  }
482  default:
483  llvm_unreachable("unexpected stackmap opcode");
484  }
485 
486  // Return false if any operands requested for folding are not foldable (not
487  // part of the stackmap's live values).
488  for (unsigned Op : Ops) {
489  if (Op < StartIdx)
490  return nullptr;
491  }
492 
493  MachineInstr *NewMI =
494  MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
495  MachineInstrBuilder MIB(MF, NewMI);
496 
497  // No need to fold return, the meta data, and function arguments
498  for (unsigned i = 0; i < StartIdx; ++i)
499  MIB.add(MI.getOperand(i));
500 
501  for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
502  MachineOperand &MO = MI.getOperand(i);
503  if (is_contained(Ops, i)) {
504  unsigned SpillSize;
505  unsigned SpillOffset;
506  // Compute the spill slot size and offset.
507  const TargetRegisterClass *RC =
508  MF.getRegInfo().getRegClass(MO.getReg());
509  bool Valid =
510  TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
511  if (!Valid)
512  report_fatal_error("cannot spill patchpoint subregister operand");
513  MIB.addImm(StackMaps::IndirectMemRefOp);
514  MIB.addImm(SpillSize);
515  MIB.addFrameIndex(FrameIndex);
516  MIB.addImm(SpillOffset);
517  }
518  else
519  MIB.add(MO);
520  }
521  return NewMI;
522 }
523 
525  ArrayRef<unsigned> Ops, int FI,
526  LiveIntervals *LIS) const {
527  auto Flags = MachineMemOperand::MONone;
528  for (unsigned OpIdx : Ops)
529  Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
531 
532  MachineBasicBlock *MBB = MI.getParent();
533  assert(MBB && "foldMemoryOperand needs an inserted instruction");
534  MachineFunction &MF = *MBB->getParent();
535 
536  // If we're not folding a load into a subreg, the size of the load is the
537  // size of the spill slot. But if we are, we need to figure out what the
538  // actual load size is.
539  int64_t MemSize = 0;
540  const MachineFrameInfo &MFI = MF.getFrameInfo();
542 
543  if (Flags & MachineMemOperand::MOStore) {
544  MemSize = MFI.getObjectSize(FI);
545  } else {
546  for (unsigned OpIdx : Ops) {
547  int64_t OpSize = MFI.getObjectSize(FI);
548 
549  if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
550  unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
551  if (SubRegSize > 0 && !(SubRegSize % 8))
552  OpSize = SubRegSize / 8;
553  }
554 
555  MemSize = std::max(MemSize, OpSize);
556  }
557  }
558 
559  assert(MemSize && "Did not expect a zero-sized stack slot");
560 
561  MachineInstr *NewMI = nullptr;
562 
563  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
564  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
565  MI.getOpcode() == TargetOpcode::STATEPOINT) {
566  // Fold stackmap/patchpoint.
567  NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
568  if (NewMI)
569  MBB->insert(MI, NewMI);
570  } else {
571  // Ask the target to do the actual folding.
572  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
573  }
574 
575  if (NewMI) {
576  NewMI->setMemRefs(MF, MI.memoperands());
577  // Add a memory operand, foldMemoryOperandImpl doesn't do that.
578  assert((!(Flags & MachineMemOperand::MOStore) ||
579  NewMI->mayStore()) &&
580  "Folded a def to a non-store!");
581  assert((!(Flags & MachineMemOperand::MOLoad) ||
582  NewMI->mayLoad()) &&
583  "Folded a use to a non-load!");
584  assert(MFI.getObjectOffset(FI) != -1);
586  MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
587  MFI.getObjectAlignment(FI));
588  NewMI->addMemOperand(MF, MMO);
589 
590  return NewMI;
591  }
592 
593  // Straight COPY may fold as load/store.
594  if (!MI.isCopy() || Ops.size() != 1)
595  return nullptr;
596 
597  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
598  if (!RC)
599  return nullptr;
600 
601  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
603 
604  if (Flags == MachineMemOperand::MOStore)
605  storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
606  else
607  loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
608  return &*--Pos;
609 }
610 
612  ArrayRef<unsigned> Ops,
613  MachineInstr &LoadMI,
614  LiveIntervals *LIS) const {
615  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
616 #ifndef NDEBUG
617  for (unsigned OpIdx : Ops)
618  assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
619 #endif
620 
621  MachineBasicBlock &MBB = *MI.getParent();
622  MachineFunction &MF = *MBB.getParent();
623 
624  // Ask the target to do the actual folding.
625  MachineInstr *NewMI = nullptr;
626  int FrameIndex = 0;
627 
628  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
629  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
630  MI.getOpcode() == TargetOpcode::STATEPOINT) &&
631  isLoadFromStackSlot(LoadMI, FrameIndex)) {
632  // Fold stackmap/patchpoint.
633  NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
634  if (NewMI)
635  NewMI = &*MBB.insert(MI, NewMI);
636  } else {
637  // Ask the target to do the actual folding.
638  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
639  }
640 
641  if (!NewMI)
642  return nullptr;
643 
644  // Copy the memoperands from the load to the folded instruction.
645  if (MI.memoperands_empty()) {
646  NewMI->setMemRefs(MF, LoadMI.memoperands());
647  } else {
648  // Handle the rare case of folding multiple loads.
649  NewMI->setMemRefs(MF, MI.memoperands());
651  E = LoadMI.memoperands_end();
652  I != E; ++I) {
653  NewMI->addMemOperand(MF, *I);
654  }
655  }
656  return NewMI;
657 }
658 
660  const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
661  const MachineOperand &Op1 = Inst.getOperand(1);
662  const MachineOperand &Op2 = Inst.getOperand(2);
663  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
664 
665  // We need virtual register definitions for the operands that we will
666  // reassociate.
667  MachineInstr *MI1 = nullptr;
668  MachineInstr *MI2 = nullptr;
670  MI1 = MRI.getUniqueVRegDef(Op1.getReg());
672  MI2 = MRI.getUniqueVRegDef(Op2.getReg());
673 
674  // And they need to be in the trace (otherwise, they won't have a depth).
675  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
676 }
677 
679  bool &Commuted) const {
680  const MachineBasicBlock *MBB = Inst.getParent();
681  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
682  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
683  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
684  unsigned AssocOpcode = Inst.getOpcode();
685 
686  // If only one operand has the same opcode and it's the second source operand,
687  // the operands must be commuted.
688  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
689  if (Commuted)
690  std::swap(MI1, MI2);
691 
692  // 1. The previous instruction must be the same type as Inst.
693  // 2. The previous instruction must have virtual register definitions for its
694  // operands in the same basic block as Inst.
695  // 3. The previous instruction's result must only be used by Inst.
696  return MI1->getOpcode() == AssocOpcode &&
697  hasReassociableOperands(*MI1, MBB) &&
698  MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
699 }
700 
701 // 1. The operation must be associative and commutative.
702 // 2. The instruction must have virtual register definitions for its
703 // operands in the same basic block.
704 // 3. The instruction must have a reassociable sibling.
706  bool &Commuted) const {
707  return isAssociativeAndCommutative(Inst) &&
708  hasReassociableOperands(Inst, Inst.getParent()) &&
709  hasReassociableSibling(Inst, Commuted);
710 }
711 
712 // The concept of the reassociation pass is that these operations can benefit
713 // from this kind of transformation:
714 //
715 // A = ? op ?
716 // B = A op X (Prev)
717 // C = B op Y (Root)
718 // -->
719 // A = ? op ?
720 // B = X op Y
721 // C = A op B
722 //
723 // breaking the dependency between A and B, allowing them to be executed in
724 // parallel (or back-to-back in a pipeline) instead of depending on each other.
725 
726 // FIXME: This has the potential to be expensive (compile time) while not
727 // improving the code at all. Some ways to limit the overhead:
728 // 1. Track successful transforms; bail out if hit rate gets too low.
729 // 2. Only enable at -O3 or some other non-default optimization level.
730 // 3. Pre-screen pattern candidates here: if an operand of the previous
731 // instruction is known to not increase the critical path, then don't match
732 // that pattern.
734  MachineInstr &Root,
735  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
736  bool Commute;
737  if (isReassociationCandidate(Root, Commute)) {
738  // We found a sequence of instructions that may be suitable for a
739  // reassociation of operands to increase ILP. Specify each commutation
740  // possibility for the Prev instruction in the sequence and let the
741  // machine combiner decide if changing the operands is worthwhile.
742  if (Commute) {
745  } else {
748  }
749  return true;
750  }
751 
752  return false;
753 }
754 
755 /// Return true when a code sequence can improve loop throughput.
756 bool
758  return false;
759 }
760 
761 /// Attempt the reassociation transformation to reduce critical path length.
762 /// See the above comments before getMachineCombinerPatterns().
764  MachineInstr &Root, MachineInstr &Prev,
765  MachineCombinerPattern Pattern,
768  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
769  MachineFunction *MF = Root.getMF();
771  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
773  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
774 
775  // This array encodes the operand index for each parameter because the
776  // operands may be commuted. Each row corresponds to a pattern value,
777  // and each column specifies the index of A, B, X, Y.
778  unsigned OpIdx[4][4] = {
779  { 1, 1, 2, 2 },
780  { 1, 2, 2, 1 },
781  { 2, 1, 1, 2 },
782  { 2, 2, 1, 1 }
783  };
784 
785  int Row;
786  switch (Pattern) {
787  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
788  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
789  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
790  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
791  default: llvm_unreachable("unexpected MachineCombinerPattern");
792  }
793 
794  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
795  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
796  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
797  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
798  MachineOperand &OpC = Root.getOperand(0);
799 
800  unsigned RegA = OpA.getReg();
801  unsigned RegB = OpB.getReg();
802  unsigned RegX = OpX.getReg();
803  unsigned RegY = OpY.getReg();
804  unsigned RegC = OpC.getReg();
805 
807  MRI.constrainRegClass(RegA, RC);
809  MRI.constrainRegClass(RegB, RC);
811  MRI.constrainRegClass(RegX, RC);
813  MRI.constrainRegClass(RegY, RC);
815  MRI.constrainRegClass(RegC, RC);
816 
817  // Create a new virtual register for the result of (X op Y) instead of
818  // recycling RegB because the MachineCombiner's computation of the critical
819  // path requires a new register definition rather than an existing one.
820  unsigned NewVR = MRI.createVirtualRegister(RC);
821  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
822 
823  unsigned Opcode = Root.getOpcode();
824  bool KillA = OpA.isKill();
825  bool KillX = OpX.isKill();
826  bool KillY = OpY.isKill();
827 
828  // Create new instructions for insertion.
829  MachineInstrBuilder MIB1 =
830  BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
831  .addReg(RegX, getKillRegState(KillX))
832  .addReg(RegY, getKillRegState(KillY));
833  MachineInstrBuilder MIB2 =
834  BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
835  .addReg(RegA, getKillRegState(KillA))
836  .addReg(NewVR, getKillRegState(true));
837 
838  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
839 
840  // Record new instructions for insertion and old instructions for deletion.
841  InsInstrs.push_back(MIB1);
842  InsInstrs.push_back(MIB2);
843  DelInstrs.push_back(&Prev);
844  DelInstrs.push_back(&Root);
845 }
846 
848  MachineInstr &Root, MachineCombinerPattern Pattern,
851  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
853 
854  // Select the previous instruction in the sequence based on the input pattern.
855  MachineInstr *Prev = nullptr;
856  switch (Pattern) {
859  Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
860  break;
863  Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
864  break;
865  default:
866  break;
867  }
868 
869  assert(Prev && "Unknown pattern for machine combiner");
870 
871  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
872 }
873 
874 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
875  const MachineInstr &MI, AliasAnalysis *AA) const {
876  const MachineFunction &MF = *MI.getMF();
877  const MachineRegisterInfo &MRI = MF.getRegInfo();
878 
879  // Remat clients assume operand 0 is the defined register.
880  if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
881  return false;
882  unsigned DefReg = MI.getOperand(0).getReg();
883 
884  // A sub-register definition can only be rematerialized if the instruction
885  // doesn't read the other parts of the register. Otherwise it is really a
886  // read-modify-write operation on the full virtual register which cannot be
887  // moved safely.
889  MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
890  return false;
891 
892  // A load from a fixed stack slot can be rematerialized. This may be
893  // redundant with subsequent checks, but it's target-independent,
894  // simple, and a common case.
895  int FrameIdx = 0;
896  if (isLoadFromStackSlot(MI, FrameIdx) &&
897  MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
898  return true;
899 
900  // Avoid instructions obviously unsafe for remat.
901  if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
902  return false;
903 
904  // Don't remat inline asm. We have no idea how expensive it is
905  // even if it's side effect free.
906  if (MI.isInlineAsm())
907  return false;
908 
909  // Avoid instructions which load from potentially varying memory.
910  if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
911  return false;
912 
913  // If any of the registers accessed are non-constant, conservatively assume
914  // the instruction is not rematerializable.
915  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
916  const MachineOperand &MO = MI.getOperand(i);
917  if (!MO.isReg()) continue;
918  unsigned Reg = MO.getReg();
919  if (Reg == 0)
920  continue;
921 
922  // Check for a well-behaved physical register.
924  if (MO.isUse()) {
925  // If the physreg has no defs anywhere, it's just an ambient register
926  // and we can freely move its uses. Alternatively, if it's allocatable,
927  // it could get allocated to something with a def during allocation.
928  if (!MRI.isConstantPhysReg(Reg))
929  return false;
930  } else {
931  // A physreg def. We can't remat it.
932  return false;
933  }
934  continue;
935  }
936 
937  // Only allow one virtual-register def. There may be multiple defs of the
938  // same virtual register, though.
939  if (MO.isDef() && Reg != DefReg)
940  return false;
941 
942  // Don't allow any virtual-register uses. Rematting an instruction with
943  // virtual register uses would length the live ranges of the uses, which
944  // is not necessarily a good idea, certainly not "trivial".
945  if (MO.isUse())
946  return false;
947  }
948 
949  // Everything checked out.
950  return true;
951 }
952 
954  const MachineFunction *MF = MI.getMF();
956  bool StackGrowsDown =
958 
959  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
960  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
961 
962  if (!isFrameInstr(MI))
963  return 0;
964 
965  int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
966 
967  if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
968  (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
969  SPAdj = -SPAdj;
970 
971  return SPAdj;
972 }
973 
974 /// isSchedulingBoundary - Test if the given instruction should be
975 /// considered a scheduling boundary. This primarily includes labels
976 /// and terminators.
978  const MachineBasicBlock *MBB,
979  const MachineFunction &MF) const {
980  // Terminators and labels can't be scheduled around.
981  if (MI.isTerminator() || MI.isPosition())
982  return true;
983 
984  // Don't attempt to schedule around any instruction that defines
985  // a stack-oriented pointer, as it's unlikely to be profitable. This
986  // saves compile time, because it doesn't require every single
987  // stack slot reference to depend on the instruction that does the
988  // modification.
989  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
992 }
993 
994 // Provide a global flag for disabling the PreRA hazard recognizer that targets
995 // may choose to honor.
997  return !DisableHazardRecognizer;
998 }
999 
1000 // Default implementation of CreateTargetRAHazardRecognizer.
1003  const ScheduleDAG *DAG) const {
1004  // Dummy hazard recognizer allows all instructions to issue.
1005  return new ScheduleHazardRecognizer();
1006 }
1007 
1008 // Default implementation of CreateTargetMIHazardRecognizer.
1011  const ScheduleDAG *DAG) const {
1012  return (ScheduleHazardRecognizer *)
1013  new ScoreboardHazardRecognizer(II, DAG, "misched");
1014 }
1015 
1016 // Default implementation of CreateTargetPostRAHazardRecognizer.
1019  const ScheduleDAG *DAG) const {
1020  return (ScheduleHazardRecognizer *)
1021  new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1022 }
1023 
1024 //===----------------------------------------------------------------------===//
1025 // SelectionDAG latency interface.
1026 //===----------------------------------------------------------------------===//
1027 
1028 int
1030  SDNode *DefNode, unsigned DefIdx,
1031  SDNode *UseNode, unsigned UseIdx) const {
1032  if (!ItinData || ItinData->isEmpty())
1033  return -1;
1034 
1035  if (!DefNode->isMachineOpcode())
1036  return -1;
1037 
1038  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1039  if (!UseNode->isMachineOpcode())
1040  return ItinData->getOperandCycle(DefClass, DefIdx);
1041  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1042  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1043 }
1044 
1046  SDNode *N) const {
1047  if (!ItinData || ItinData->isEmpty())
1048  return 1;
1049 
1050  if (!N->isMachineOpcode())
1051  return 1;
1052 
1053  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1054 }
1055 
1056 //===----------------------------------------------------------------------===//
1057 // MachineInstr latency interface.
1058 //===----------------------------------------------------------------------===//
1059 
1061  const MachineInstr &MI) const {
1062  if (!ItinData || ItinData->isEmpty())
1063  return 1;
1064 
1065  unsigned Class = MI.getDesc().getSchedClass();
1066  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1067  if (UOps >= 0)
1068  return UOps;
1069 
1070  // The # of u-ops is dynamically determined. The specific target should
1071  // override this function to return the right number.
1072  return 1;
1073 }
1074 
1075 /// Return the default expected latency for a def based on it's opcode.
1077  const MachineInstr &DefMI) const {
1078  if (DefMI.isTransient())
1079  return 0;
1080  if (DefMI.mayLoad())
1081  return SchedModel.LoadLatency;
1082  if (isHighLatencyDef(DefMI.getOpcode()))
1083  return SchedModel.HighLatency;
1084  return 1;
1085 }
1086 
1088  return 0;
1089 }
1090 
1092  const MachineInstr &MI,
1093  unsigned *PredCost) const {
1094  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1095  // still have a MinLatency property, which getStageLatency checks.
1096  if (!ItinData)
1097  return MI.mayLoad() ? 2 : 1;
1098 
1099  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1100 }
1101 
1103  const MachineInstr &DefMI,
1104  unsigned DefIdx) const {
1105  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1106  if (!ItinData || ItinData->isEmpty())
1107  return false;
1108 
1109  unsigned DefClass = DefMI.getDesc().getSchedClass();
1110  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1111  return (DefCycle != -1 && DefCycle <= 1);
1112 }
1113 
1114 /// Both DefMI and UseMI must be valid. By default, call directly to the
1115 /// itinerary. This may be overriden by the target.
1117  const MachineInstr &DefMI,
1118  unsigned DefIdx,
1119  const MachineInstr &UseMI,
1120  unsigned UseIdx) const {
1121  unsigned DefClass = DefMI.getDesc().getSchedClass();
1122  unsigned UseClass = UseMI.getDesc().getSchedClass();
1123  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1124 }
1125 
1126 /// If we can determine the operand latency from the def only, without itinerary
1127 /// lookup, do so. Otherwise return -1.
1129  const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1130 
1131  // Let the target hook getInstrLatency handle missing itineraries.
1132  if (!ItinData)
1133  return getInstrLatency(ItinData, DefMI);
1134 
1135  if(ItinData->isEmpty())
1136  return defaultDefLatency(ItinData->SchedModel, DefMI);
1137 
1138  // ...operand lookup required
1139  return -1;
1140 }
1141 
1143  const MachineInstr &MI, unsigned DefIdx,
1144  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1145  assert((MI.isRegSequence() ||
1146  MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1147 
1148  if (!MI.isRegSequence())
1149  return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1150 
1151  // We are looking at:
1152  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1153  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1154  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1155  OpIdx += 2) {
1156  const MachineOperand &MOReg = MI.getOperand(OpIdx);
1157  if (MOReg.isUndef())
1158  continue;
1159  const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1160  assert(MOSubIdx.isImm() &&
1161  "One of the subindex of the reg_sequence is not an immediate");
1162  // Record Reg:SubReg, SubIdx.
1163  InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1164  (unsigned)MOSubIdx.getImm()));
1165  }
1166  return true;
1167 }
1168 
1170  const MachineInstr &MI, unsigned DefIdx,
1171  RegSubRegPairAndIdx &InputReg) const {
1172  assert((MI.isExtractSubreg() ||
1173  MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1174 
1175  if (!MI.isExtractSubreg())
1176  return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1177 
1178  // We are looking at:
1179  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1180  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1181  const MachineOperand &MOReg = MI.getOperand(1);
1182  if (MOReg.isUndef())
1183  return false;
1184  const MachineOperand &MOSubIdx = MI.getOperand(2);
1185  assert(MOSubIdx.isImm() &&
1186  "The subindex of the extract_subreg is not an immediate");
1187 
1188  InputReg.Reg = MOReg.getReg();
1189  InputReg.SubReg = MOReg.getSubReg();
1190  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1191  return true;
1192 }
1193 
1195  const MachineInstr &MI, unsigned DefIdx,
1196  RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1197  assert((MI.isInsertSubreg() ||
1198  MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1199 
1200  if (!MI.isInsertSubreg())
1201  return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1202 
1203  // We are looking at:
1204  // Def = INSERT_SEQUENCE v0, v1, sub0.
1205  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1206  const MachineOperand &MOBaseReg = MI.getOperand(1);
1207  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1208  if (MOInsertedReg.isUndef())
1209  return false;
1210  const MachineOperand &MOSubIdx = MI.getOperand(3);
1211  assert(MOSubIdx.isImm() &&
1212  "One of the subindex of the reg_sequence is not an immediate");
1213  BaseReg.Reg = MOBaseReg.getReg();
1214  BaseReg.SubReg = MOBaseReg.getSubReg();
1215 
1216  InsertedReg.Reg = MOInsertedReg.getReg();
1217  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1218  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1219  return true;
1220 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class...
Definition: MCInstrDesc.h:87
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:782
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
void setIsUndef(bool Val=true)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
unsigned getSubReg() const
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
bool isInlineAsm() const
virtual const TargetLowering * getTargetLowering() const
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:687
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
bool isRegSequence() const
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
unsigned getCallFrameDestroyOpcode() const
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
void setIsRenamable(bool Val=true)
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
bool isInternalRead() const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
A description of a memory reference used in the backend.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
unsigned getVarIdx() const
Get starting index of non call related arguments (calling convention, statepoint flags, vm state and gc state).
Definition: StackMaps.h:173
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:412
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
const InstrItinerary * Itineraries
Array of itineraries selected.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
unsigned SubReg
MachineInstr & CloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:649
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
const InstrItineraryData * getInstrItineraries() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:406
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
bool isBundle() const
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment. ...
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
Itinerary data supplied by a subtarget to be used by a target.
virtual const TargetInstrInfo * getInstrInfo() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const
Return true if this load instruction never traps and points to a memory location whose value doesn&#39;t ...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:657
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:161
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
TargetInstrInfo - Interface to description of machine instruction set.
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
static const unsigned CommuteAnyOperandIndex
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:571
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:820
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:410
unsigned LoadLatency
Definition: MCSchedule.h:288
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
virtual unsigned getPredicationCost(const MachineInstr &MI) const
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:516
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool readsVirtualRegister(unsigned Reg) const
Return true if the MachineInstr reads the specified virtual register.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:480
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int16_t NumMicroOps
of micro-ops, -1 means it&#39;s variable
void setMBB(MachineBasicBlock *MBB)
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
StringRef getCommentString() const
Definition: MCAsmInfo.h:486
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:128
void setImm(int64_t immVal)
void setIsInternalRead(bool Val=true)
MI-level patchpoint operands.
Definition: StackMaps.h:77
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
Measure the specified inline asm to determine an approximation of its length.
const MachineInstrBuilder & addFrameIndex(int Idx) const
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool isInsertSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic INSERT_SUBREG instructions...
Definition: MachineInstr.h:796
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:53
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isConstantPhysReg(unsigned PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
void setIsKill(bool Val=true)
The memory access writes data.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:187
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
Iterator for intrusive lists based on ilist_node.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:534
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
bool isInsertSubreg() const
A pair composed of a register and a sub-register index.
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:57
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
Information about stack frame layout on the target.
unsigned HighLatency
Definition: MCSchedule.h:295
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
Represents one node in the SelectionDAG.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:477
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool isEmpty() const
Returns true if there are no itineraries.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isRegSequenceLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic REG_SEQUENCE instructions.
Definition: MachineInstr.h:767
Representation of each machine instruction.
Definition: MachineInstr.h:64
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MCSchedModel SchedModel
Basic machine properties.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
Definition: StackMaps.h:36
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:73
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:753
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void setSubReg(unsigned subReg)
virtual const TargetFrameLowering * getFrameLowering() const
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
MI-level Statepoint operands.
Definition: StackMaps.h:155
uint32_t Size
Definition: Profile.cpp:47
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:807
bool memoperands_empty() const
Return true if we don&#39;t have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:546
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand *> MemRefs)
Assign this MachineInstr&#39;s memory reference descriptor list.
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
bool isPosition() const
Definition: MachineInstr.h:995
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:174
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
IRTranslator LLVM IR MI
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:640
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:247
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool isExtractSubreg() const
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:848
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
Definition: MachineInstr.h:723
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:541
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
A pair composed of a pair of a register and a sub-register index, and another sub-register index...
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1101