LLVM  9.0.0svn
TargetInstrInfo.h
Go to the documentation of this file.
1 //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes the target machine instruction set to the code generator.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_TARGET_TARGETINSTRINFO_H
14 #define LLVM_TARGET_TARGETINSTRINFO_H
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/None.h"
29 #include "llvm/MC/MCInstrInfo.h"
32 #include <cassert>
33 #include <cstddef>
34 #include <cstdint>
35 #include <utility>
36 #include <vector>
37 
38 namespace llvm {
39 
40 class DFAPacketizer;
41 class InstrItineraryData;
42 class LiveIntervals;
43 class LiveVariables;
44 class MachineMemOperand;
45 class MachineRegisterInfo;
46 class MCAsmInfo;
47 class MCInst;
48 struct MCSchedModel;
49 class Module;
50 class ScheduleDAG;
51 class ScheduleHazardRecognizer;
52 class SDNode;
53 class SelectionDAG;
54 class RegScavenger;
55 class TargetRegisterClass;
56 class TargetRegisterInfo;
57 class TargetSchedModel;
58 class TargetSubtargetInfo;
59 
60 template <class T> class SmallVectorImpl;
61 
62 //---------------------------------------------------------------------------
63 ///
64 /// TargetInstrInfo - Interface to description of machine instruction set
65 ///
66 class TargetInstrInfo : public MCInstrInfo {
67 public:
68  TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
69  unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
70  : CallFrameSetupOpcode(CFSetupOpcode),
71  CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
72  ReturnOpcode(ReturnOpcode) {}
73  TargetInstrInfo(const TargetInstrInfo &) = delete;
74  TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
75  virtual ~TargetInstrInfo();
76 
77  static bool isGenericOpcode(unsigned Opc) {
78  return Opc <= TargetOpcode::GENERIC_OP_END;
79  }
80 
81  /// Given a machine instruction descriptor, returns the register
82  /// class constraint for OpNum, or NULL.
83  const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
84  const TargetRegisterInfo *TRI,
85  const MachineFunction &MF) const;
86 
87  /// Return true if the instruction is trivially rematerializable, meaning it
88  /// has no side effects and requires no operands that aren't always available.
89  /// This means the only allowed uses are constants and unallocatable physical
90  /// registers so that the instructions result is independent of the place
91  /// in the function.
93  AliasAnalysis *AA = nullptr) const {
94  return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
95  (MI.getDesc().isRematerializable() &&
97  isReallyTriviallyReMaterializableGeneric(MI, AA)));
98  }
99 
100 protected:
101  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
102  /// set, this hook lets the target specify whether the instruction is actually
103  /// trivially rematerializable, taking into consideration its operands. This
104  /// predicate must return false if the instruction has any side effects other
105  /// than producing a value, or if it requres any address registers that are
106  /// not always available.
107  /// Requirements must be check as stated in isTriviallyReMaterializable() .
109  AliasAnalysis *AA) const {
110  return false;
111  }
112 
113  /// This method commutes the operands of the given machine instruction MI.
114  /// The operands to be commuted are specified by their indices OpIdx1 and
115  /// OpIdx2.
116  ///
117  /// If a target has any instructions that are commutable but require
118  /// converting to different instructions or making non-trivial changes
119  /// to commute them, this method can be overloaded to do that.
120  /// The default implementation simply swaps the commutable operands.
121  ///
122  /// If NewMI is false, MI is modified in place and returned; otherwise, a
123  /// new machine instruction is created and returned.
124  ///
125  /// Do not call this method for a non-commutable instruction.
126  /// Even though the instruction is commutable, the method may still
127  /// fail to commute the operands, null pointer is returned in such cases.
128  virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
129  unsigned OpIdx1,
130  unsigned OpIdx2) const;
131 
132  /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
133  /// operand indices to (ResultIdx1, ResultIdx2).
134  /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
135  /// predefined to some indices or be undefined (designated by the special
136  /// value 'CommuteAnyOperandIndex').
137  /// The predefined result indices cannot be re-defined.
138  /// The function returns true iff after the result pair redefinition
139  /// the fixed result pair is equal to or equivalent to the source pair of
140  /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
141  /// the pairs (x,y) and (y,x) are equivalent.
142  static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
143  unsigned CommutableOpIdx1,
144  unsigned CommutableOpIdx2);
145 
146 private:
147  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
148  /// set and the target hook isReallyTriviallyReMaterializable returns false,
149  /// this function does target-independent tests to determine if the
150  /// instruction is really trivially rematerializable.
151  bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
152  AliasAnalysis *AA) const;
153 
154 public:
155  /// These methods return the opcode of the frame setup/destroy instructions
156  /// if they exist (-1 otherwise). Some targets use pseudo instructions in
157  /// order to abstract away the difference between operating with a frame
158  /// pointer and operating without, through the use of these two instructions.
159  ///
160  unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
161  unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
162 
163  /// Returns true if the argument is a frame pseudo instruction.
164  bool isFrameInstr(const MachineInstr &I) const {
165  return I.getOpcode() == getCallFrameSetupOpcode() ||
167  }
168 
169  /// Returns true if the argument is a frame setup pseudo instruction.
170  bool isFrameSetup(const MachineInstr &I) const {
171  return I.getOpcode() == getCallFrameSetupOpcode();
172  }
173 
174  /// Returns size of the frame associated with the given frame instruction.
175  /// For frame setup instruction this is frame that is set up space set up
176  /// after the instruction. For frame destroy instruction this is the frame
177  /// freed by the caller.
178  /// Note, in some cases a call frame (or a part of it) may be prepared prior
179  /// to the frame setup instruction. It occurs in the calls that involve
180  /// inalloca arguments. This function reports only the size of the frame part
181  /// that is set up between the frame setup and destroy pseudo instructions.
182  int64_t getFrameSize(const MachineInstr &I) const {
183  assert(isFrameInstr(I) && "Not a frame instruction");
184  assert(I.getOperand(0).getImm() >= 0);
185  return I.getOperand(0).getImm();
186  }
187 
188  /// Returns the total frame size, which is made up of the space set up inside
189  /// the pair of frame start-stop instructions and the space that is set up
190  /// prior to the pair.
191  int64_t getFrameTotalSize(const MachineInstr &I) const {
192  if (isFrameSetup(I)) {
193  assert(I.getOperand(1).getImm() >= 0 &&
194  "Frame size must not be negative");
195  return getFrameSize(I) + I.getOperand(1).getImm();
196  }
197  return getFrameSize(I);
198  }
199 
200  unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
201  unsigned getReturnOpcode() const { return ReturnOpcode; }
202 
203  /// Returns the actual stack pointer adjustment made by an instruction
204  /// as part of a call sequence. By default, only call frame setup/destroy
205  /// instructions adjust the stack, but targets may want to override this
206  /// to enable more fine-grained adjustment, or adjust by a different value.
207  virtual int getSPAdjust(const MachineInstr &MI) const;
208 
209  /// Return true if the instruction is a "coalescable" extension instruction.
210  /// That is, it's like a copy where it's legal for the source to overlap the
211  /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
212  /// expected the pre-extension value is available as a subreg of the result
213  /// register. This also returns the sub-register index in SubIdx.
214  virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
215  unsigned &DstReg, unsigned &SubIdx) const {
216  return false;
217  }
218 
219  /// If the specified machine instruction is a direct
220  /// load from a stack slot, return the virtual or physical register number of
221  /// the destination along with the FrameIndex of the loaded stack slot. If
222  /// not, return 0. This predicate must return 0 if the instruction has
223  /// any side effects other than loading from the stack slot.
224  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
225  int &FrameIndex) const {
226  return 0;
227  }
228 
229  /// Optional extension of isLoadFromStackSlot that returns the number of
230  /// bytes loaded from the stack. This must be implemented if a backend
231  /// supports partial stack slot spills/loads to further disambiguate
232  /// what the load does.
233  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
234  int &FrameIndex,
235  unsigned &MemBytes) const {
236  MemBytes = 0;
237  return isLoadFromStackSlot(MI, FrameIndex);
238  }
239 
240  /// Check for post-frame ptr elimination stack locations as well.
241  /// This uses a heuristic so it isn't reliable for correctness.
242  virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
243  int &FrameIndex) const {
244  return 0;
245  }
246 
247  /// If the specified machine instruction has a load from a stack slot,
248  /// return true along with the FrameIndices of the loaded stack slot and the
249  /// machine mem operands containing the reference.
250  /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
251  /// any instructions that loads from the stack. This is just a hint, as some
252  /// cases may be missed.
253  virtual bool hasLoadFromStackSlot(
254  const MachineInstr &MI,
256 
257  /// If the specified machine instruction is a direct
258  /// store to a stack slot, return the virtual or physical register number of
259  /// the source reg along with the FrameIndex of the loaded stack slot. If
260  /// not, return 0. This predicate must return 0 if the instruction has
261  /// any side effects other than storing to the stack slot.
262  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
263  int &FrameIndex) const {
264  return 0;
265  }
266 
267  /// Optional extension of isStoreToStackSlot that returns the number of
268  /// bytes stored to the stack. This must be implemented if a backend
269  /// supports partial stack slot spills/loads to further disambiguate
270  /// what the store does.
271  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
272  int &FrameIndex,
273  unsigned &MemBytes) const {
274  MemBytes = 0;
275  return isStoreToStackSlot(MI, FrameIndex);
276  }
277 
278  /// Check for post-frame ptr elimination stack locations as well.
279  /// This uses a heuristic, so it isn't reliable for correctness.
280  virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
281  int &FrameIndex) const {
282  return 0;
283  }
284 
285  /// If the specified machine instruction has a store to a stack slot,
286  /// return true along with the FrameIndices of the loaded stack slot and the
287  /// machine mem operands containing the reference.
288  /// If not, return false. Unlike isStoreToStackSlot,
289  /// this returns true for any instructions that stores to the
290  /// stack. This is just a hint, as some cases may be missed.
291  virtual bool hasStoreToStackSlot(
292  const MachineInstr &MI,
294 
295  /// Return true if the specified machine instruction
296  /// is a copy of one stack slot to another and has no other effect.
297  /// Provide the identity of the two frame indices.
298  virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
299  int &SrcFrameIndex) const {
300  return false;
301  }
302 
303  /// Compute the size in bytes and offset within a stack slot of a spilled
304  /// register or subregister.
305  ///
306  /// \param [out] Size in bytes of the spilled value.
307  /// \param [out] Offset in bytes within the stack slot.
308  /// \returns true if both Size and Offset are successfully computed.
309  ///
310  /// Not all subregisters have computable spill slots. For example,
311  /// subregisters registers may not be byte-sized, and a pair of discontiguous
312  /// subregisters has no single offset.
313  ///
314  /// Targets with nontrivial bigendian implementations may need to override
315  /// this, particularly to support spilled vector registers.
316  virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
317  unsigned &Size, unsigned &Offset,
318  const MachineFunction &MF) const;
319 
320  /// Returns the size in bytes of the specified MachineInstr, or ~0U
321  /// when this function is not implemented by a target.
322  virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
323  return ~0U;
324  }
325 
326  /// Return true if the instruction is as cheap as a move instruction.
327  ///
328  /// Targets for different archs need to override this, and different
329  /// micro-architectures can also be finely tuned inside.
330  virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
331  return MI.isAsCheapAsAMove();
332  }
333 
334  /// Return true if the instruction should be sunk by MachineSink.
335  ///
336  /// MachineSink determines on its own whether the instruction is safe to sink;
337  /// this gives the target a hook to override the default behavior with regards
338  /// to which instructions should be sunk.
339  virtual bool shouldSink(const MachineInstr &MI) const { return true; }
340 
341  /// Re-issue the specified 'original' instruction at the
342  /// specific location targeting a new destination register.
343  /// The register in Orig->getOperand(0).getReg() will be substituted by
344  /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
345  /// SubIdx.
346  virtual void reMaterialize(MachineBasicBlock &MBB,
347  MachineBasicBlock::iterator MI, unsigned DestReg,
348  unsigned SubIdx, const MachineInstr &Orig,
349  const TargetRegisterInfo &TRI) const;
350 
351  /// Clones instruction or the whole instruction bundle \p Orig and
352  /// insert into \p MBB before \p InsertBefore. The target may update operands
353  /// that are required to be unique.
354  ///
355  /// \p Orig must not return true for MachineInstr::isNotDuplicable().
357  MachineBasicBlock::iterator InsertBefore,
358  const MachineInstr &Orig) const;
359 
360  /// This method must be implemented by targets that
361  /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
362  /// may be able to convert a two-address instruction into one or more true
363  /// three-address instructions on demand. This allows the X86 target (for
364  /// example) to convert ADD and SHL instructions into LEA instructions if they
365  /// would require register copies due to two-addressness.
366  ///
367  /// This method returns a null pointer if the transformation cannot be
368  /// performed, otherwise it returns the last new instruction.
369  ///
371  MachineInstr &MI,
372  LiveVariables *LV) const {
373  return nullptr;
374  }
375 
376  // This constant can be used as an input value of operand index passed to
377  // the method findCommutedOpIndices() to tell the method that the
378  // corresponding operand index is not pre-defined and that the method
379  // can pick any commutable operand.
380  static const unsigned CommuteAnyOperandIndex = ~0U;
381 
382  /// This method commutes the operands of the given machine instruction MI.
383  ///
384  /// The operands to be commuted are specified by their indices OpIdx1 and
385  /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
386  /// 'CommuteAnyOperandIndex', which means that the method is free to choose
387  /// any arbitrarily chosen commutable operand. If both arguments are set to
388  /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
389  /// operands; then commutes them if such operands could be found.
390  ///
391  /// If NewMI is false, MI is modified in place and returned; otherwise, a
392  /// new machine instruction is created and returned.
393  ///
394  /// Do not call this method for a non-commutable instruction or
395  /// for non-commuable operands.
396  /// Even though the instruction is commutable, the method may still
397  /// fail to commute the operands, null pointer is returned in such cases.
398  MachineInstr *
399  commuteInstruction(MachineInstr &MI, bool NewMI = false,
400  unsigned OpIdx1 = CommuteAnyOperandIndex,
401  unsigned OpIdx2 = CommuteAnyOperandIndex) const;
402 
403  /// Returns true iff the routine could find two commutable operands in the
404  /// given machine instruction.
405  /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
406  /// If any of the INPUT values is set to the special value
407  /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
408  /// operand, then returns its index in the corresponding argument.
409  /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
410  /// looks for 2 commutable operands.
411  /// If INPUT values refer to some operands of MI, then the method simply
412  /// returns true if the corresponding operands are commutable and returns
413  /// false otherwise.
414  ///
415  /// For example, calling this method this way:
416  /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
417  /// findCommutedOpIndices(MI, Op1, Op2);
418  /// can be interpreted as a query asking to find an operand that would be
419  /// commutable with the operand#1.
420  virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
421  unsigned &SrcOpIdx2) const;
422 
423  /// A pair composed of a register and a sub-register index.
424  /// Used to give some type checking when modeling Reg:SubReg.
425  struct RegSubRegPair {
426  unsigned Reg;
427  unsigned SubReg;
428 
429  RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
430  : Reg(Reg), SubReg(SubReg) {}
431 
432  bool operator==(const RegSubRegPair& P) const {
433  return Reg == P.Reg && SubReg == P.SubReg;
434  }
435  bool operator!=(const RegSubRegPair& P) const {
436  return !(*this == P);
437  }
438  };
439 
440  /// A pair composed of a pair of a register and a sub-register index,
441  /// and another sub-register index.
442  /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
444  unsigned SubIdx;
445 
446  RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
447  unsigned SubIdx = 0)
448  : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
449  };
450 
451  /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
452  /// and \p DefIdx.
453  /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
454  /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
455  /// flag are not added to this list.
456  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
457  /// two elements:
458  /// - %1:sub1, sub0
459  /// - %2<:0>, sub1
460  ///
461  /// \returns true if it is possible to build such an input sequence
462  /// with the pair \p MI, \p DefIdx. False otherwise.
463  ///
464  /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
465  ///
466  /// \note The generic implementation does not provide any support for
467  /// MI.isRegSequenceLike(). In other words, one has to override
468  /// getRegSequenceLikeInputs for target specific instructions.
469  bool
470  getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
471  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
472 
473  /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
474  /// and \p DefIdx.
475  /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
476  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
477  /// - %1:sub1, sub0
478  ///
479  /// \returns true if it is possible to build such an input sequence
480  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
481  /// False otherwise.
482  ///
483  /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
484  ///
485  /// \note The generic implementation does not provide any support for
486  /// MI.isExtractSubregLike(). In other words, one has to override
487  /// getExtractSubregLikeInputs for target specific instructions.
488  bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
489  RegSubRegPairAndIdx &InputReg) const;
490 
491  /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
492  /// and \p DefIdx.
493  /// \p [out] BaseReg and \p [out] InsertedReg contain
494  /// the equivalent inputs of INSERT_SUBREG.
495  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
496  /// - BaseReg: %0:sub0
497  /// - InsertedReg: %1:sub1, sub3
498  ///
499  /// \returns true if it is possible to build such an input sequence
500  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
501  /// False otherwise.
502  ///
503  /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
504  ///
505  /// \note The generic implementation does not provide any support for
506  /// MI.isInsertSubregLike(). In other words, one has to override
507  /// getInsertSubregLikeInputs for target specific instructions.
508  bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
509  RegSubRegPair &BaseReg,
510  RegSubRegPairAndIdx &InsertedReg) const;
511 
512  /// Return true if two machine instructions would produce identical values.
513  /// By default, this is only true when the two instructions
514  /// are deemed identical except for defs. If this function is called when the
515  /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
516  /// aggressive checks.
517  virtual bool produceSameValue(const MachineInstr &MI0,
518  const MachineInstr &MI1,
519  const MachineRegisterInfo *MRI = nullptr) const;
520 
521  /// \returns true if a branch from an instruction with opcode \p BranchOpc
522  /// bytes is capable of jumping to a position \p BrOffset bytes away.
523  virtual bool isBranchOffsetInRange(unsigned BranchOpc,
524  int64_t BrOffset) const {
525  llvm_unreachable("target did not implement");
526  }
527 
528  /// \returns The block that branch instruction \p MI jumps to.
530  llvm_unreachable("target did not implement");
531  }
532 
533  /// Insert an unconditional indirect branch at the end of \p MBB to \p
534  /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to
535  /// the offset of the position to insert the new branch.
536  ///
537  /// \returns The number of bytes added to the block.
539  MachineBasicBlock &NewDestBB,
540  const DebugLoc &DL,
541  int64_t BrOffset = 0,
542  RegScavenger *RS = nullptr) const {
543  llvm_unreachable("target did not implement");
544  }
545 
546  /// Analyze the branching code at the end of MBB, returning
547  /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
548  /// implemented for a target). Upon success, this returns false and returns
549  /// with the following information in various cases:
550  ///
551  /// 1. If this block ends with no branches (it just falls through to its succ)
552  /// just return false, leaving TBB/FBB null.
553  /// 2. If this block ends with only an unconditional branch, it sets TBB to be
554  /// the destination block.
555  /// 3. If this block ends with a conditional branch and it falls through to a
556  /// successor block, it sets TBB to be the branch destination block and a
557  /// list of operands that evaluate the condition. These operands can be
558  /// passed to other TargetInstrInfo methods to create new branches.
559  /// 4. If this block ends with a conditional branch followed by an
560  /// unconditional branch, it returns the 'true' destination in TBB, the
561  /// 'false' destination in FBB, and a list of operands that evaluate the
562  /// condition. These operands can be passed to other TargetInstrInfo
563  /// methods to create new branches.
564  ///
565  /// Note that removeBranch and insertBranch must be implemented to support
566  /// cases where this method returns success.
567  ///
568  /// If AllowModify is true, then this routine is allowed to modify the basic
569  /// block (e.g. delete instructions after the unconditional branch).
570  ///
571  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
572  /// before calling this function.
574  MachineBasicBlock *&FBB,
576  bool AllowModify = false) const {
577  return true;
578  }
579 
580  /// Represents a predicate at the MachineFunction level. The control flow a
581  /// MachineBranchPredicate represents is:
582  ///
583  /// Reg = LHS `Predicate` RHS == ConditionDef
584  /// if Reg then goto TrueDest else goto FalseDest
585  ///
588  PRED_EQ, // True if two values are equal
589  PRED_NE, // True if two values are not equal
590  PRED_INVALID // Sentinel value
591  };
592 
593  ComparePredicate Predicate = PRED_INVALID;
596  MachineBasicBlock *TrueDest = nullptr;
597  MachineBasicBlock *FalseDest = nullptr;
598  MachineInstr *ConditionDef = nullptr;
599 
600  /// SingleUseCondition is true if ConditionDef is dead except for the
601  /// branch(es) at the end of the basic block.
602  ///
603  bool SingleUseCondition = false;
604 
605  explicit MachineBranchPredicate() = default;
606  };
607 
608  /// Analyze the branching code at the end of MBB and parse it into the
609  /// MachineBranchPredicate structure if possible. Returns false on success
610  /// and true on failure.
611  ///
612  /// If AllowModify is true, then this routine is allowed to modify the basic
613  /// block (e.g. delete instructions after the unconditional branch).
614  ///
617  bool AllowModify = false) const {
618  return true;
619  }
620 
621  /// Remove the branching code at the end of the specific MBB.
622  /// This is only invoked in cases where AnalyzeBranch returns success. It
623  /// returns the number of instructions that were removed.
624  /// If \p BytesRemoved is non-null, report the change in code size from the
625  /// removed instructions.
626  virtual unsigned removeBranch(MachineBasicBlock &MBB,
627  int *BytesRemoved = nullptr) const {
628  llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
629  }
630 
631  /// Insert branch code into the end of the specified MachineBasicBlock. The
632  /// operands to this method are the same as those returned by AnalyzeBranch.
633  /// This is only invoked in cases where AnalyzeBranch returns success. It
634  /// returns the number of instructions inserted. If \p BytesAdded is non-null,
635  /// report the change in code size from the added instructions.
636  ///
637  /// It is also invoked by tail merging to add unconditional branches in
638  /// cases where AnalyzeBranch doesn't apply because there was no original
639  /// branch to analyze. At least this much must be implemented, else tail
640  /// merging needs to be disabled.
641  ///
642  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
643  /// before calling this function.
644  virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
645  MachineBasicBlock *FBB,
647  const DebugLoc &DL,
648  int *BytesAdded = nullptr) const {
649  llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
650  }
651 
653  MachineBasicBlock *DestBB,
654  const DebugLoc &DL,
655  int *BytesAdded = nullptr) const {
656  return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
657  BytesAdded);
658  }
659 
660  /// Analyze the loop code, return true if it cannot be understoo. Upon
661  /// success, this function returns false and returns information about the
662  /// induction variable and compare instruction used at the end.
663  virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
664  MachineInstr *&CmpInst) const {
665  return true;
666  }
667 
668  /// Generate code to reduce the loop iteration by one and check if the loop
669  /// is finished. Return the value/register of the new loop count. We need
670  /// this function when peeling off one or more iterations of a loop. This
671  /// function assumes the nth iteration is peeled first.
672  virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar,
673  MachineInstr &Cmp,
676  unsigned Iter, unsigned MaxIter) const {
677  llvm_unreachable("Target didn't implement ReduceLoopCount");
678  }
679 
680  /// Delete the instruction OldInst and everything after it, replacing it with
681  /// an unconditional branch to NewDest. This is used by the tail merging pass.
683  MachineBasicBlock *NewDest) const;
684 
685  /// Return true if it's legal to split the given basic
686  /// block at the specified instruction (i.e. instruction would be the start
687  /// of a new basic block).
689  MachineBasicBlock::iterator MBBI) const {
690  return true;
691  }
692 
693  /// Return true if it's profitable to predicate
694  /// instructions with accumulated instruction latency of "NumCycles"
695  /// of the specified basic block, where the probability of the instructions
696  /// being executed is given by Probability, and Confidence is a measure
697  /// of our confidence that it will be properly predicted.
698  virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
699  unsigned ExtraPredCycles,
700  BranchProbability Probability) const {
701  return false;
702  }
703 
704  /// Second variant of isProfitableToIfCvt. This one
705  /// checks for the case where two basic blocks from true and false path
706  /// of a if-then-else (diamond) are predicated on mutally exclusive
707  /// predicates, where the probability of the true path being taken is given
708  /// by Probability, and Confidence is a measure of our confidence that it
709  /// will be properly predicted.
710  virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
711  unsigned ExtraTCycles,
712  MachineBasicBlock &FMBB, unsigned NumFCycles,
713  unsigned ExtraFCycles,
714  BranchProbability Probability) const {
715  return false;
716  }
717 
718  /// Return true if it's profitable for if-converter to duplicate instructions
719  /// of specified accumulated instruction latencies in the specified MBB to
720  /// enable if-conversion.
721  /// The probability of the instructions being executed is given by
722  /// Probability, and Confidence is a measure of our confidence that it
723  /// will be properly predicted.
725  unsigned NumCycles,
726  BranchProbability Probability) const {
727  return false;
728  }
729 
730  /// Return true if it's profitable to unpredicate
731  /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
732  /// exclusive predicates.
733  /// e.g.
734  /// subeq r0, r1, #1
735  /// addne r0, r1, #1
736  /// =>
737  /// sub r0, r1, #1
738  /// addne r0, r1, #1
739  ///
740  /// This may be profitable is conditional instructions are always executed.
742  MachineBasicBlock &FMBB) const {
743  return false;
744  }
745 
746  /// Return true if it is possible to insert a select
747  /// instruction that chooses between TrueReg and FalseReg based on the
748  /// condition code in Cond.
749  ///
750  /// When successful, also return the latency in cycles from TrueReg,
751  /// FalseReg, and Cond to the destination register. In most cases, a select
752  /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
753  ///
754  /// Some x86 implementations have 2-cycle cmov instructions.
755  ///
756  /// @param MBB Block where select instruction would be inserted.
757  /// @param Cond Condition returned by AnalyzeBranch.
758  /// @param TrueReg Virtual register to select when Cond is true.
759  /// @param FalseReg Virtual register to select when Cond is false.
760  /// @param CondCycles Latency from Cond+Branch to select output.
761  /// @param TrueCycles Latency from TrueReg to select output.
762  /// @param FalseCycles Latency from FalseReg to select output.
763  virtual bool canInsertSelect(const MachineBasicBlock &MBB,
764  ArrayRef<MachineOperand> Cond, unsigned TrueReg,
765  unsigned FalseReg, int &CondCycles,
766  int &TrueCycles, int &FalseCycles) const {
767  return false;
768  }
769 
770  /// Insert a select instruction into MBB before I that will copy TrueReg to
771  /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
772  ///
773  /// This function can only be called after canInsertSelect() returned true.
774  /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
775  /// that the same flags or registers required by Cond are available at the
776  /// insertion point.
777  ///
778  /// @param MBB Block where select instruction should be inserted.
779  /// @param I Insertion point.
780  /// @param DL Source location for debugging.
781  /// @param DstReg Virtual register to be defined by select instruction.
782  /// @param Cond Condition as computed by AnalyzeBranch.
783  /// @param TrueReg Virtual register to copy when Cond is true.
784  /// @param FalseReg Virtual register to copy when Cons is false.
785  virtual void insertSelect(MachineBasicBlock &MBB,
787  unsigned DstReg, ArrayRef<MachineOperand> Cond,
788  unsigned TrueReg, unsigned FalseReg) const {
789  llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
790  }
791 
792  /// Analyze the given select instruction, returning true if
793  /// it cannot be understood. It is assumed that MI->isSelect() is true.
794  ///
795  /// When successful, return the controlling condition and the operands that
796  /// determine the true and false result values.
797  ///
798  /// Result = SELECT Cond, TrueOp, FalseOp
799  ///
800  /// Some targets can optimize select instructions, for example by predicating
801  /// the instruction defining one of the operands. Such targets should set
802  /// Optimizable.
803  ///
804  /// @param MI Select instruction to analyze.
805  /// @param Cond Condition controlling the select.
806  /// @param TrueOp Operand number of the value selected when Cond is true.
807  /// @param FalseOp Operand number of the value selected when Cond is false.
808  /// @param Optimizable Returned as true if MI is optimizable.
809  /// @returns False on success.
810  virtual bool analyzeSelect(const MachineInstr &MI,
812  unsigned &TrueOp, unsigned &FalseOp,
813  bool &Optimizable) const {
814  assert(MI.getDesc().isSelect() && "MI must be a select instruction");
815  return true;
816  }
817 
818  /// Given a select instruction that was understood by
819  /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
820  /// merging it with one of its operands. Returns NULL on failure.
821  ///
822  /// When successful, returns the new select instruction. The client is
823  /// responsible for deleting MI.
824  ///
825  /// If both sides of the select can be optimized, PreferFalse is used to pick
826  /// a side.
827  ///
828  /// @param MI Optimizable select instruction.
829  /// @param NewMIs Set that record all MIs in the basic block up to \p
830  /// MI. Has to be updated with any newly created MI or deleted ones.
831  /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
832  /// @returns Optimized instruction or NULL.
835  bool PreferFalse = false) const {
836  // This function must be implemented if Optimizable is ever set.
837  llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
838  }
839 
840  /// Emit instructions to copy a pair of physical registers.
841  ///
842  /// This function should support copies within any legal register class as
843  /// well as any cross-class copies created during instruction selection.
844  ///
845  /// The source and destination registers may overlap, which may require a
846  /// careful implementation when multiple copy instructions are required for
847  /// large registers. See for example the ARM target.
848  virtual void copyPhysReg(MachineBasicBlock &MBB,
850  unsigned DestReg, unsigned SrcReg,
851  bool KillSrc) const {
852  llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
853  }
854 
855 protected:
856  /// Target-dependent implemenation for IsCopyInstr.
857  /// If the specific machine instruction is a instruction that moves/copies
858  /// value from one register to another register return true along with
859  /// @Source machine operand and @Destination machine operand.
860  virtual bool isCopyInstrImpl(const MachineInstr &MI,
861  const MachineOperand *&Source,
862  const MachineOperand *&Destination) const {
863  return false;
864  }
865 
866 public:
867  /// If the specific machine instruction is a instruction that moves/copies
868  /// value from one register to another register return true along with
869  /// @Source machine operand and @Destination machine operand.
870  /// For COPY-instruction the method naturally returns true, for all other
871  /// instructions the method calls target-dependent implementation.
873  const MachineOperand *&Destination) const {
874  if (MI.isCopy()) {
875  Destination = &MI.getOperand(0);
876  Source = &MI.getOperand(1);
877  return true;
878  }
879  return isCopyInstrImpl(MI, Source, Destination);
880  }
881 
882  /// Store the specified register of the given register class to the specified
883  /// stack frame index. The store instruction is to be added to the given
884  /// machine basic block before the specified machine instruction. If isKill
885  /// is true, the register operand is the last use and must be marked kill.
888  unsigned SrcReg, bool isKill, int FrameIndex,
889  const TargetRegisterClass *RC,
890  const TargetRegisterInfo *TRI) const {
891  llvm_unreachable("Target didn't implement "
892  "TargetInstrInfo::storeRegToStackSlot!");
893  }
894 
895  /// Load the specified register of the given register class from the specified
896  /// stack frame index. The load instruction is to be added to the given
897  /// machine basic block before the specified machine instruction.
900  unsigned DestReg, int FrameIndex,
901  const TargetRegisterClass *RC,
902  const TargetRegisterInfo *TRI) const {
903  llvm_unreachable("Target didn't implement "
904  "TargetInstrInfo::loadRegFromStackSlot!");
905  }
906 
907  /// This function is called for all pseudo instructions
908  /// that remain after register allocation. Many pseudo instructions are
909  /// created to help register allocation. This is the place to convert them
910  /// into real instructions. The target can edit MI in place, or it can insert
911  /// new instructions and erase MI. The function should return true if
912  /// anything was changed.
913  virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
914 
915  /// Check whether the target can fold a load that feeds a subreg operand
916  /// (or a subreg operand that feeds a store).
917  /// For example, X86 may want to return true if it can fold
918  /// movl (%esp), %eax
919  /// subb, %al, ...
920  /// Into:
921  /// subb (%esp), ...
922  ///
923  /// Ideally, we'd like the target implementation of foldMemoryOperand() to
924  /// reject subregs - but since this behavior used to be enforced in the
925  /// target-independent code, moving this responsibility to the targets
926  /// has the potential of causing nasty silent breakage in out-of-tree targets.
927  virtual bool isSubregFoldable() const { return false; }
928 
929  /// Attempt to fold a load or store of the specified stack
930  /// slot into the specified machine instruction for the specified operand(s).
931  /// If this is possible, a new instruction is returned with the specified
932  /// operand folded, otherwise NULL is returned.
933  /// The new instruction is inserted before MI, and the client is responsible
934  /// for removing the old instruction.
936  int FI,
937  LiveIntervals *LIS = nullptr) const;
938 
939  /// Same as the previous version except it allows folding of any load and
940  /// store from / to any address, not just from a specific stack slot.
942  MachineInstr &LoadMI,
943  LiveIntervals *LIS = nullptr) const;
944 
945  /// Return true when there is potentially a faster code sequence
946  /// for an instruction chain ending in \p Root. All potential patterns are
947  /// returned in the \p Pattern vector. Pattern should be sorted in priority
948  /// order since the pattern evaluator stops checking as soon as it finds a
949  /// faster sequence.
950  /// \param Root - Instruction that could be combined with one of its operands
951  /// \param Patterns - Vector of possible combination patterns
952  virtual bool getMachineCombinerPatterns(
953  MachineInstr &Root,
955 
956  /// Return true when a code sequence can improve throughput. It
957  /// should be called only for instructions in loops.
958  /// \param Pattern - combiner pattern
959  virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
960 
961  /// Return true if the input \P Inst is part of a chain of dependent ops
962  /// that are suitable for reassociation, otherwise return false.
963  /// If the instruction's operands must be commuted to have a previous
964  /// instruction of the same type define the first source operand, \P Commuted
965  /// will be set to true.
966  bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
967 
968  /// Return true when \P Inst is both associative and commutative.
969  virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
970  return false;
971  }
972 
973  /// Return true when \P Inst has reassociable operands in the same \P MBB.
974  virtual bool hasReassociableOperands(const MachineInstr &Inst,
975  const MachineBasicBlock *MBB) const;
976 
977  /// Return true when \P Inst has reassociable sibling.
978  bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
979 
980  /// When getMachineCombinerPatterns() finds patterns, this function generates
981  /// the instructions that could replace the original code sequence. The client
982  /// has to decide whether the actual replacement is beneficial or not.
983  /// \param Root - Instruction that could be combined with one of its operands
984  /// \param Pattern - Combination pattern for Root
985  /// \param InsInstrs - Vector of new instructions that implement P
986  /// \param DelInstrs - Old instructions, including Root, that could be
987  /// replaced by InsInstr
988  /// \param InstIdxForVirtReg - map of virtual register to instruction in
989  /// InsInstr that defines it
990  virtual void genAlternativeCodeSequence(
991  MachineInstr &Root, MachineCombinerPattern Pattern,
994  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
995 
996  /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
997  /// reduce critical path length.
998  void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
999  MachineCombinerPattern Pattern,
1002  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
1003 
1004  /// This is an architecture-specific helper function of reassociateOps.
1005  /// Set special operand attributes for new instructions after reassociation.
1006  virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1007  MachineInstr &NewMI1,
1008  MachineInstr &NewMI2) const {}
1009 
1010  /// Return true when a target supports MachineCombiner.
1011  virtual bool useMachineCombiner() const { return false; }
1012 
1013  /// Return true if the given SDNode can be copied during scheduling
1014  /// even if it has glue.
1015  virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1016 
1017 protected:
1018  /// Target-dependent implementation for foldMemoryOperand.
1019  /// Target-independent code in foldMemoryOperand will
1020  /// take care of adding a MachineMemOperand to the newly created instruction.
1021  /// The instruction and any auxiliary instructions necessary will be inserted
1022  /// at InsertPt.
1023  virtual MachineInstr *
1025  ArrayRef<unsigned> Ops,
1027  LiveIntervals *LIS = nullptr) const {
1028  return nullptr;
1029  }
1030 
1031  /// Target-dependent implementation for foldMemoryOperand.
1032  /// Target-independent code in foldMemoryOperand will
1033  /// take care of adding a MachineMemOperand to the newly created instruction.
1034  /// The instruction and any auxiliary instructions necessary will be inserted
1035  /// at InsertPt.
1038  MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1039  LiveIntervals *LIS = nullptr) const {
1040  return nullptr;
1041  }
1042 
1043  /// Target-dependent implementation of getRegSequenceInputs.
1044  ///
1045  /// \returns true if it is possible to build the equivalent
1046  /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1047  ///
1048  /// \pre MI.isRegSequenceLike().
1049  ///
1050  /// \see TargetInstrInfo::getRegSequenceInputs.
1052  const MachineInstr &MI, unsigned DefIdx,
1053  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1054  return false;
1055  }
1056 
1057  /// Target-dependent implementation of getExtractSubregInputs.
1058  ///
1059  /// \returns true if it is possible to build the equivalent
1060  /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1061  ///
1062  /// \pre MI.isExtractSubregLike().
1063  ///
1064  /// \see TargetInstrInfo::getExtractSubregInputs.
1066  unsigned DefIdx,
1067  RegSubRegPairAndIdx &InputReg) const {
1068  return false;
1069  }
1070 
1071  /// Target-dependent implementation of getInsertSubregInputs.
1072  ///
1073  /// \returns true if it is possible to build the equivalent
1074  /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1075  ///
1076  /// \pre MI.isInsertSubregLike().
1077  ///
1078  /// \see TargetInstrInfo::getInsertSubregInputs.
1079  virtual bool
1080  getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
1081  RegSubRegPair &BaseReg,
1082  RegSubRegPairAndIdx &InsertedReg) const {
1083  return false;
1084  }
1085 
1086 public:
1087  /// getAddressSpaceForPseudoSourceKind - Given the kind of memory
1088  /// (e.g. stack) the target returns the corresponding address space.
1089  virtual unsigned
1091  return 0;
1092  }
1093 
1094  /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1095  /// a store or a load and a store into two or more instruction. If this is
1096  /// possible, returns true as well as the new instructions by reference.
1097  virtual bool
1099  bool UnfoldLoad, bool UnfoldStore,
1100  SmallVectorImpl<MachineInstr *> &NewMIs) const {
1101  return false;
1102  }
1103 
1105  SmallVectorImpl<SDNode *> &NewNodes) const {
1106  return false;
1107  }
1108 
1109  /// Returns the opcode of the would be new
1110  /// instruction after load / store are unfolded from an instruction of the
1111  /// specified opcode. It returns zero if the specified unfolding is not
1112  /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1113  /// index of the operand which will hold the register holding the loaded
1114  /// value.
1115  virtual unsigned
1116  getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1117  unsigned *LoadRegIndex = nullptr) const {
1118  return 0;
1119  }
1120 
1121  /// This is used by the pre-regalloc scheduler to determine if two loads are
1122  /// loading from the same base address. It should only return true if the base
1123  /// pointers are the same and the only differences between the two addresses
1124  /// are the offset. It also returns the offsets by reference.
1125  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1126  int64_t &Offset1,
1127  int64_t &Offset2) const {
1128  return false;
1129  }
1130 
1131  /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1132  /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1133  /// On some targets if two loads are loading from
1134  /// addresses in the same cache line, it's better if they are scheduled
1135  /// together. This function takes two integers that represent the load offsets
1136  /// from the common base address. It returns true if it decides it's desirable
1137  /// to schedule the two loads together. "NumLoads" is the number of loads that
1138  /// have already been scheduled after Load1.
1139  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1140  int64_t Offset1, int64_t Offset2,
1141  unsigned NumLoads) const {
1142  return false;
1143  }
1144 
1145  /// Get the base operand and byte offset of an instruction that reads/writes
1146  /// memory.
1148  MachineOperand *&BaseOp, int64_t &Offset,
1149  const TargetRegisterInfo *TRI) const {
1150  return false;
1151  }
1152 
1153  /// Return true if the instruction contains a base register and offset. If
1154  /// true, the function also sets the operand position in the instruction
1155  /// for the base register and offset.
1156  virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
1157  unsigned &BasePos,
1158  unsigned &OffsetPos) const {
1159  return false;
1160  }
1161 
1162  /// If the instruction is an increment of a constant value, return the amount.
1163  virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1164  return false;
1165  }
1166 
1167  /// Returns true if the two given memory operations should be scheduled
1168  /// adjacent. Note that you have to add:
1169  /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1170  /// or
1171  /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1172  /// to TargetPassConfig::createMachineScheduler() to have an effect.
1173  virtual bool shouldClusterMemOps(MachineOperand &BaseOp1,
1174  MachineOperand &BaseOp2,
1175  unsigned NumLoads) const {
1176  llvm_unreachable("target did not implement shouldClusterMemOps()");
1177  }
1178 
1179  /// Reverses the branch condition of the specified condition list,
1180  /// returning false on success and true if it cannot be reversed.
1181  virtual bool
1183  return true;
1184  }
1185 
1186  /// Insert a noop into the instruction stream at the specified point.
1187  virtual void insertNoop(MachineBasicBlock &MBB,
1188  MachineBasicBlock::iterator MI) const;
1189 
1190  /// Return the noop instruction to use for a noop.
1191  virtual void getNoop(MCInst &NopInst) const;
1192 
1193  /// Return true for post-incremented instructions.
1194  virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1195 
1196  /// Returns true if the instruction is already predicated.
1197  virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1198 
1199  /// Returns true if the instruction is a
1200  /// terminator instruction that has not been predicated.
1201  virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1202 
1203  /// Returns true if MI is an unconditional tail call.
1204  virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1205  return false;
1206  }
1207 
1208  /// Returns true if the tail call can be made conditional on BranchCond.
1210  const MachineInstr &TailCall) const {
1211  return false;
1212  }
1213 
1214  /// Replace the conditional branch in MBB with a conditional tail call.
1217  const MachineInstr &TailCall) const {
1218  llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1219  }
1220 
1221  /// Convert the instruction into a predicated instruction.
1222  /// It returns true if the operation was successful.
1223  virtual bool PredicateInstruction(MachineInstr &MI,
1224  ArrayRef<MachineOperand> Pred) const;
1225 
1226  /// Returns true if the first specified predicate
1227  /// subsumes the second, e.g. GE subsumes GT.
1229  ArrayRef<MachineOperand> Pred2) const {
1230  return false;
1231  }
1232 
1233  /// If the specified instruction defines any predicate
1234  /// or condition code register(s) used for predication, returns true as well
1235  /// as the definition predicate(s) by reference.
1236  virtual bool DefinesPredicate(MachineInstr &MI,
1237  std::vector<MachineOperand> &Pred) const {
1238  return false;
1239  }
1240 
1241  /// Return true if the specified instruction can be predicated.
1242  /// By default, this returns true for every instruction with a
1243  /// PredicateOperand.
1244  virtual bool isPredicable(const MachineInstr &MI) const {
1245  return MI.getDesc().isPredicable();
1246  }
1247 
1248  /// Return true if it's safe to move a machine
1249  /// instruction that defines the specified register class.
1250  virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1251  return true;
1252  }
1253 
1254  /// Test if the given instruction should be considered a scheduling boundary.
1255  /// This primarily includes labels and terminators.
1256  virtual bool isSchedulingBoundary(const MachineInstr &MI,
1257  const MachineBasicBlock *MBB,
1258  const MachineFunction &MF) const;
1259 
1260  /// Measure the specified inline asm to determine an approximation of its
1261  /// length.
1262  virtual unsigned getInlineAsmLength(const char *Str,
1263  const MCAsmInfo &MAI) const;
1264 
1265  /// Allocate and return a hazard recognizer to use for this target when
1266  /// scheduling the machine instructions before register allocation.
1267  virtual ScheduleHazardRecognizer *
1269  const ScheduleDAG *DAG) const;
1270 
1271  /// Allocate and return a hazard recognizer to use for this target when
1272  /// scheduling the machine instructions before register allocation.
1273  virtual ScheduleHazardRecognizer *
1275  const ScheduleDAG *DAG) const;
1276 
1277  /// Allocate and return a hazard recognizer to use for this target when
1278  /// scheduling the machine instructions after register allocation.
1279  virtual ScheduleHazardRecognizer *
1281  const ScheduleDAG *DAG) const;
1282 
1283  /// Allocate and return a hazard recognizer to use for by non-scheduling
1284  /// passes.
1285  virtual ScheduleHazardRecognizer *
1287  return nullptr;
1288  }
1289 
1290  /// Provide a global flag for disabling the PreRA hazard recognizer that
1291  /// targets may choose to honor.
1292  bool usePreRAHazardRecognizer() const;
1293 
1294  /// For a comparison instruction, return the source registers
1295  /// in SrcReg and SrcReg2 if having two register operands, and the value it
1296  /// compares against in CmpValue. Return true if the comparison instruction
1297  /// can be analyzed.
1298  virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1299  unsigned &SrcReg2, int &Mask, int &Value) const {
1300  return false;
1301  }
1302 
1303  /// See if the comparison instruction can be converted
1304  /// into something more efficient. E.g., on ARM most instructions can set the
1305  /// flags register, obviating the need for a separate CMP.
1306  virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
1307  unsigned SrcReg2, int Mask, int Value,
1308  const MachineRegisterInfo *MRI) const {
1309  return false;
1310  }
1311  virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1312 
1313  /// Try to remove the load by folding it to a register operand at the use.
1314  /// We fold the load instructions if and only if the
1315  /// def and use are in the same BB. We only look at one load and see
1316  /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1317  /// defined by the load we are trying to fold. DefMI returns the machine
1318  /// instruction that defines FoldAsLoadDefReg, and the function returns
1319  /// the machine instruction generated due to folding.
1321  const MachineRegisterInfo *MRI,
1322  unsigned &FoldAsLoadDefReg,
1323  MachineInstr *&DefMI) const {
1324  return nullptr;
1325  }
1326 
1327  /// 'Reg' is known to be defined by a move immediate instruction,
1328  /// try to fold the immediate into the use instruction.
1329  /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1330  /// then the caller may assume that DefMI has been erased from its parent
1331  /// block. The caller may assume that it will not be erased by this
1332  /// function otherwise.
1334  unsigned Reg, MachineRegisterInfo *MRI) const {
1335  return false;
1336  }
1337 
1338  /// Return the number of u-operations the given machine
1339  /// instruction will be decoded to on the target cpu. The itinerary's
1340  /// IssueWidth is the number of microops that can be dispatched each
1341  /// cycle. An instruction with zero microops takes no dispatch resources.
1342  virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1343  const MachineInstr &MI) const;
1344 
1345  /// Return true for pseudo instructions that don't consume any
1346  /// machine resources in their current form. These are common cases that the
1347  /// scheduler should consider free, rather than conservatively handling them
1348  /// as instructions with no itinerary.
1349  bool isZeroCost(unsigned Opcode) const {
1350  return Opcode <= TargetOpcode::COPY;
1351  }
1352 
1353  virtual int getOperandLatency(const InstrItineraryData *ItinData,
1354  SDNode *DefNode, unsigned DefIdx,
1355  SDNode *UseNode, unsigned UseIdx) const;
1356 
1357  /// Compute and return the use operand latency of a given pair of def and use.
1358  /// In most cases, the static scheduling itinerary was enough to determine the
1359  /// operand latency. But it may not be possible for instructions with variable
1360  /// number of defs / uses.
1361  ///
1362  /// This is a raw interface to the itinerary that may be directly overridden
1363  /// by a target. Use computeOperandLatency to get the best estimate of
1364  /// latency.
1365  virtual int getOperandLatency(const InstrItineraryData *ItinData,
1366  const MachineInstr &DefMI, unsigned DefIdx,
1367  const MachineInstr &UseMI,
1368  unsigned UseIdx) const;
1369 
1370  /// Compute the instruction latency of a given instruction.
1371  /// If the instruction has higher cost when predicated, it's returned via
1372  /// PredCost.
1373  virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1374  const MachineInstr &MI,
1375  unsigned *PredCost = nullptr) const;
1376 
1377  virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1378 
1379  virtual int getInstrLatency(const InstrItineraryData *ItinData,
1380  SDNode *Node) const;
1381 
1382  /// Return the default expected latency for a def based on its opcode.
1383  unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1384  const MachineInstr &DefMI) const;
1385 
1386  int computeDefOperandLatency(const InstrItineraryData *ItinData,
1387  const MachineInstr &DefMI) const;
1388 
1389  /// Return true if this opcode has high latency to its result.
1390  virtual bool isHighLatencyDef(int opc) const { return false; }
1391 
1392  /// Compute operand latency between a def of 'Reg'
1393  /// and a use in the current loop. Return true if the target considered
1394  /// it 'high'. This is used by optimization passes such as machine LICM to
1395  /// determine whether it makes sense to hoist an instruction out even in a
1396  /// high register pressure situation.
1397  virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1398  const MachineRegisterInfo *MRI,
1399  const MachineInstr &DefMI, unsigned DefIdx,
1400  const MachineInstr &UseMI,
1401  unsigned UseIdx) const {
1402  return false;
1403  }
1404 
1405  /// Compute operand latency of a def of 'Reg'. Return true
1406  /// if the target considered it 'low'.
1407  virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1408  const MachineInstr &DefMI,
1409  unsigned DefIdx) const;
1410 
1411  /// Perform target-specific instruction verification.
1412  virtual bool verifyInstruction(const MachineInstr &MI,
1413  StringRef &ErrInfo) const {
1414  return true;
1415  }
1416 
1417  /// Return the current execution domain and bit mask of
1418  /// possible domains for instruction.
1419  ///
1420  /// Some micro-architectures have multiple execution domains, and multiple
1421  /// opcodes that perform the same operation in different domains. For
1422  /// example, the x86 architecture provides the por, orps, and orpd
1423  /// instructions that all do the same thing. There is a latency penalty if a
1424  /// register is written in one domain and read in another.
1425  ///
1426  /// This function returns a pair (domain, mask) containing the execution
1427  /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1428  /// function can be used to change the opcode to one of the domains in the
1429  /// bit mask. Instructions whose execution domain can't be changed should
1430  /// return a 0 mask.
1431  ///
1432  /// The execution domain numbers don't have any special meaning except domain
1433  /// 0 is used for instructions that are not associated with any interesting
1434  /// execution domain.
1435  ///
1436  virtual std::pair<uint16_t, uint16_t>
1438  return std::make_pair(0, 0);
1439  }
1440 
1441  /// Change the opcode of MI to execute in Domain.
1442  ///
1443  /// The bit (1 << Domain) must be set in the mask returned from
1444  /// getExecutionDomain(MI).
1445  virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1446 
1447  /// Returns the preferred minimum clearance
1448  /// before an instruction with an unwanted partial register update.
1449  ///
1450  /// Some instructions only write part of a register, and implicitly need to
1451  /// read the other parts of the register. This may cause unwanted stalls
1452  /// preventing otherwise unrelated instructions from executing in parallel in
1453  /// an out-of-order CPU.
1454  ///
1455  /// For example, the x86 instruction cvtsi2ss writes its result to bits
1456  /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1457  /// the instruction needs to wait for the old value of the register to become
1458  /// available:
1459  ///
1460  /// addps %xmm1, %xmm0
1461  /// movaps %xmm0, (%rax)
1462  /// cvtsi2ss %rbx, %xmm0
1463  ///
1464  /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1465  /// instruction before it can issue, even though the high bits of %xmm0
1466  /// probably aren't needed.
1467  ///
1468  /// This hook returns the preferred clearance before MI, measured in
1469  /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1470  /// instructions before MI. It should only return a positive value for
1471  /// unwanted dependencies. If the old bits of the defined register have
1472  /// useful values, or if MI is determined to otherwise read the dependency,
1473  /// the hook should return 0.
1474  ///
1475  /// The unwanted dependency may be handled by:
1476  ///
1477  /// 1. Allocating the same register for an MI def and use. That makes the
1478  /// unwanted dependency identical to a required dependency.
1479  ///
1480  /// 2. Allocating a register for the def that has no defs in the previous N
1481  /// instructions.
1482  ///
1483  /// 3. Calling breakPartialRegDependency() with the same arguments. This
1484  /// allows the target to insert a dependency breaking instruction.
1485  ///
1486  virtual unsigned
1487  getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
1488  const TargetRegisterInfo *TRI) const {
1489  // The default implementation returns 0 for no partial register dependency.
1490  return 0;
1491  }
1492 
1493  /// Return the minimum clearance before an instruction that reads an
1494  /// unused register.
1495  ///
1496  /// For example, AVX instructions may copy part of a register operand into
1497  /// the unused high bits of the destination register.
1498  ///
1499  /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1500  ///
1501  /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1502  /// false dependence on any previous write to %xmm0.
1503  ///
1504  /// This hook works similarly to getPartialRegUpdateClearance, except that it
1505  /// does not take an operand index. Instead sets \p OpNum to the index of the
1506  /// unused register.
1507  virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
1508  const TargetRegisterInfo *TRI) const {
1509  // The default implementation returns 0 for no undef register dependency.
1510  return 0;
1511  }
1512 
1513  /// Insert a dependency-breaking instruction
1514  /// before MI to eliminate an unwanted dependency on OpNum.
1515  ///
1516  /// If it wasn't possible to avoid a def in the last N instructions before MI
1517  /// (see getPartialRegUpdateClearance), this hook will be called to break the
1518  /// unwanted dependency.
1519  ///
1520  /// On x86, an xorps instruction can be used as a dependency breaker:
1521  ///
1522  /// addps %xmm1, %xmm0
1523  /// movaps %xmm0, (%rax)
1524  /// xorps %xmm0, %xmm0
1525  /// cvtsi2ss %rbx, %xmm0
1526  ///
1527  /// An <imp-kill> operand should be added to MI if an instruction was
1528  /// inserted. This ties the instructions together in the post-ra scheduler.
1529  ///
1530  virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1531  const TargetRegisterInfo *TRI) const {}
1532 
1533  /// Create machine specific model for scheduling.
1534  virtual DFAPacketizer *
1536  return nullptr;
1537  }
1538 
1539  /// Sometimes, it is possible for the target
1540  /// to tell, even without aliasing information, that two MIs access different
1541  /// memory addresses. This function returns true if two MIs access different
1542  /// memory addresses and false otherwise.
1543  ///
1544  /// Assumes any physical registers used to compute addresses have the same
1545  /// value for both instructions. (This is the most useful assumption for
1546  /// post-RA scheduling.)
1547  ///
1548  /// See also MachineInstr::mayAlias, which is implemented on top of this
1549  /// function.
1550  virtual bool
1552  AliasAnalysis *AA = nullptr) const {
1553  assert((MIa.mayLoad() || MIa.mayStore()) &&
1554  "MIa must load from or modify a memory location");
1555  assert((MIb.mayLoad() || MIb.mayStore()) &&
1556  "MIb must load from or modify a memory location");
1557  return false;
1558  }
1559 
1560  /// Return the value to use for the MachineCSE's LookAheadLimit,
1561  /// which is a heuristic used for CSE'ing phys reg defs.
1562  virtual unsigned getMachineCSELookAheadLimit() const {
1563  // The default lookahead is small to prevent unprofitable quadratic
1564  // behavior.
1565  return 5;
1566  }
1567 
1568  /// Return an array that contains the ids of the target indices (used for the
1569  /// TargetIndex machine operand) and their names.
1570  ///
1571  /// MIR Serialization is able to serialize only the target indices that are
1572  /// defined by this method.
1575  return None;
1576  }
1577 
1578  /// Decompose the machine operand's target flags into two values - the direct
1579  /// target flag value and any of bit flags that are applied.
1580  virtual std::pair<unsigned, unsigned>
1581  decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
1582  return std::make_pair(0u, 0u);
1583  }
1584 
1585  /// Return an array that contains the direct target flag values and their
1586  /// names.
1587  ///
1588  /// MIR Serialization is able to serialize only the target flags that are
1589  /// defined by this method.
1592  return None;
1593  }
1594 
1595  /// Return an array that contains the bitmask target flag values and their
1596  /// names.
1597  ///
1598  /// MIR Serialization is able to serialize only the target flags that are
1599  /// defined by this method.
1602  return None;
1603  }
1604 
1605  /// Return an array that contains the MMO target flag values and their
1606  /// names.
1607  ///
1608  /// MIR Serialization is able to serialize only the MMO target flags that are
1609  /// defined by this method.
1612  return None;
1613  }
1614 
1615  /// Determines whether \p Inst is a tail call instruction. Override this
1616  /// method on targets that do not properly set MCID::Return and MCID::Call on
1617  /// tail call instructions."
1618  virtual bool isTailCall(const MachineInstr &Inst) const {
1619  return Inst.isReturn() && Inst.isCall();
1620  }
1621 
1622  /// True if the instruction is bound to the top of its basic block and no
1623  /// other instructions shall be inserted before it. This can be implemented
1624  /// to prevent register allocator to insert spills before such instructions.
1625  virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
1626  return false;
1627  }
1628 
1629  /// Returns a \p outliner::OutlinedFunction struct containing target-specific
1630  /// information for a set of outlining candidates.
1632  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1634  "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
1635  }
1636 
1637  /// Returns how or if \p MI should be outlined.
1638  virtual outliner::InstrType
1639  getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1641  "Target didn't implement TargetInstrInfo::getOutliningType!");
1642  }
1643 
1644  /// Optional target hook that returns true if \p MBB is safe to outline from,
1645  /// and returns any target-specific information in \p Flags.
1647  unsigned &Flags) const {
1648  return true;
1649  }
1650 
1651  /// Insert a custom frame for outlined functions.
1653  const outliner::OutlinedFunction &OF) const {
1655  "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
1656  }
1657 
1658  /// Insert a call to an outlined function into the program.
1659  /// Returns an iterator to the spot where we inserted the call. This must be
1660  /// implemented by the target.
1664  const outliner::Candidate &C) const {
1666  "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
1667  }
1668 
1669  /// Return true if the function can safely be outlined from.
1670  /// A function \p MF is considered safe for outlining if an outlined function
1671  /// produced from instructions in F will produce a program which produces the
1672  /// same output for any set of given inputs.
1674  bool OutlineFromLinkOnceODRs) const {
1675  llvm_unreachable("Target didn't implement "
1676  "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
1677  }
1678 
1679  /// Return true if the function should be outlined from by default.
1681  return false;
1682  }
1683 
1684 private:
1685  unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
1686  unsigned CatchRetOpcode;
1687  unsigned ReturnOpcode;
1688 };
1689 
1690 /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
1693 
1695  return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
1696  RegInfo::getEmptyKey());
1697  }
1698 
1700  return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
1701  RegInfo::getTombstoneKey());
1702  }
1703 
1704  /// Reuse getHashValue implementation from
1705  /// std::pair<unsigned, unsigned>.
1706  static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
1707  std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
1708  return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
1709  }
1710 
1711  static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
1712  const TargetInstrInfo::RegSubRegPair &RHS) {
1713  return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
1714  RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
1715  }
1716 };
1717 
1718 } // end namespace llvm
1719 
1720 #endif // LLVM_TARGET_TARGETINSTRINFO_H
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand&#39;s target flags into two values - the direct target flag value and any o...
uint64_t CallInst * C
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:636
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:632
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DstReg, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it&#39;s safe to move a machine instruction that defines the specified register class...
virtual bool isCopyInstrImpl(const MachineInstr &MI, const MachineOperand *&Source, const MachineOperand *&Destination) const
Target-dependent implemenation for IsCopyInstr.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
RegSubRegPair(unsigned Reg=0, unsigned SubReg=0)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int Mask, int Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient. ...
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI, AliasAnalysis *AA) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
virtual bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const
Analyze the given select instruction, returning true if it cannot be understood.
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
unsigned getCallFrameDestroyOpcode() const
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static bool isGenericOpcode(unsigned Opc)
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr *> &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
An individual sequence of instructions to be replaced with a call to an outlined function.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
Represents a predicate at the MachineFunction level.
virtual bool getMemOperandWithOffset(MachineInstr &MI, MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE&#39;s LookAheadLimit, which is a heuristic used for CSE&#39;ing ph...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack...
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, const outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
Provide an instruction scheduling machine model to CodeGen passes.
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg, MachineRegisterInfo *MRI) const
&#39;Reg&#39; is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr *> &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool operator!=(const RegSubRegPair &P) const
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
virtual MachineInstr * convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual bool isBasicBlockPrologue(const MachineInstr &MI) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
unsigned getCatchReturnOpcode() const
bool isSelect() const
Return true if this is a select instruction.
Definition: MCInstrDesc.h:320
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const
Emit instructions to copy a pair of physical registers.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:405
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
static bool isEqual(const Function &Caller, const Function &Callee)
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode *> &NewNodes) const
bool isRematerializable() const
Returns true if this instruction is a candidate for remat.
Definition: MCInstrDesc.h:480
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it&#39;s profitable to predicate instructions with accumulated instruction latency of "Num...
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Itinerary data supplied by a subtarget to be used by a target.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
RegSubRegPairAndIdx(unsigned Reg=0, unsigned SubReg=0, unsigned SubIdx=0)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr *> &NewMIs, bool PreferFalse=false) const
Given a select instruction that was understood by analyzeSelect and returned Optimizable = true...
virtual bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const
Sometimes, it is possible for the target to tell, even without aliasing information, that two MIs access different memory addresses.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:158
BasicBlockListType::iterator iterator
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
TargetInstrInfo - Interface to description of machine instruction set.
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:622
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:55
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
TargetInstrInfo::RegSubRegPair RegSubRegPair
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
bool operator==(const RegSubRegPair &P) const
static const unsigned CommuteAnyOperandIndex
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:819
#define P(N)
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
unsigned const MachineRegisterInfo * MRI
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
virtual unsigned getPredicationCost(const MachineInstr &MI) const
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
InstrType
Represents how an instruction should be mapped by the outliner.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
MachineInstrBuilder & UseMI
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it&#39;s legal to split the given basic block at the specified instruction (i...
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack...
The information necessary to create an outlined function for some class of candidate.
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MI should be outlined.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:23
Contains all data structures shared between the outliner implemented in MachineOutliner.cpp and target implementations of the outliner.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
A set of register units.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:307
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
Measure the specified inline asm to determine an approximation of its length.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
MachineOperand class - Representation of each machine instruction operand.
A pair composed of a register and a sub-register index.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of &#39;Reg&#39; and a use in the current loop.
MachineInstrBuilder MachineInstrBuilder & DefMI
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it&#39;s profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
Represents one node in the SelectionDAG.
virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
unsigned getReturnOpcode() const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
Representation of each machine instruction.
Definition: MachineInstr.h:63
static TargetInstrInfo::RegSubRegPair getEmptyKey()
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understoo.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Source, const MachineOperand *&Destination) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don&#39;t consume any machine resources in their current form...
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
uint32_t Size
Definition: Profile.cpp:46
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, unsigned &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
const unsigned Kind
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:806
virtual bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2, unsigned NumLoads) const
Returns true if the two given memory operations should be scheduled adjacent.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
LLVM Value Representation.
Definition: Value.h:72
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
IRTranslator LLVM IR MI
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
bool isTriviallyReMaterializable(const MachineInstr &MI, AliasAnalysis *AA=nullptr) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it&#39;s profitable to unpredicate one side of a &#39;diamond&#39;, i.e.
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isAsCheapAsAMove(QueryType Type=AllInBundle) const
Returns true if this instruction has the same cost (or less) than a move instruction.
Definition: MachineInstr.h:905
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register. ...
A pair composed of a pair of a register and a sub-register index, and another sub-register index...