Line data Source code
1 : //===- llvm/Target/TargetInstrInfo.h - Instruction Info ---------*- C++ -*-===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : // This file describes the target machine instruction set to the code generator.
11 : //
12 : //===----------------------------------------------------------------------===//
13 :
14 : #ifndef LLVM_TARGET_TARGETINSTRINFO_H
15 : #define LLVM_TARGET_TARGETINSTRINFO_H
16 :
17 : #include "llvm/ADT/ArrayRef.h"
18 : #include "llvm/ADT/DenseMap.h"
19 : #include "llvm/ADT/DenseMapInfo.h"
20 : #include "llvm/ADT/None.h"
21 : #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 : #include "llvm/CodeGen/MachineBasicBlock.h"
23 : #include "llvm/CodeGen/MachineCombinerPattern.h"
24 : #include "llvm/CodeGen/MachineFunction.h"
25 : #include "llvm/CodeGen/MachineInstr.h"
26 : #include "llvm/CodeGen/MachineLoopInfo.h"
27 : #include "llvm/CodeGen/MachineOperand.h"
28 : #include "llvm/MC/MCInstrInfo.h"
29 : #include "llvm/Support/BranchProbability.h"
30 : #include "llvm/Support/ErrorHandling.h"
31 : #include <cassert>
32 : #include <cstddef>
33 : #include <cstdint>
34 : #include <utility>
35 : #include <vector>
36 :
37 : namespace llvm {
38 :
39 : class DFAPacketizer;
40 : class InstrItineraryData;
41 : class LiveVariables;
42 : class MachineMemOperand;
43 : class MachineRegisterInfo;
44 : class MCAsmInfo;
45 : class MCInst;
46 : struct MCSchedModel;
47 : class Module;
48 : class ScheduleDAG;
49 : class ScheduleHazardRecognizer;
50 : class SDNode;
51 : class SelectionDAG;
52 : class RegScavenger;
53 : class TargetRegisterClass;
54 : class TargetRegisterInfo;
55 : class TargetSchedModel;
56 : class TargetSubtargetInfo;
57 :
58 : template <class T> class SmallVectorImpl;
59 :
60 : //---------------------------------------------------------------------------
61 : ///
62 : /// TargetInstrInfo - Interface to description of machine instruction set
63 : ///
64 : class TargetInstrInfo : public MCInstrInfo {
65 : public:
66 : TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
67 : unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
68 28523 : : CallFrameSetupOpcode(CFSetupOpcode),
69 : CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
70 28523 : ReturnOpcode(ReturnOpcode) {}
71 : TargetInstrInfo(const TargetInstrInfo &) = delete;
72 : TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
73 : virtual ~TargetInstrInfo();
74 :
75 : static bool isGenericOpcode(unsigned Opc) {
76 : return Opc <= TargetOpcode::GENERIC_OP_END;
77 : }
78 :
79 : /// Given a machine instruction descriptor, returns the register
80 : /// class constraint for OpNum, or NULL.
81 : const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
82 : const TargetRegisterInfo *TRI,
83 : const MachineFunction &MF) const;
84 :
85 : /// Return true if the instruction is trivially rematerializable, meaning it
86 : /// has no side effects and requires no operands that aren't always available.
87 : /// This means the only allowed uses are constants and unallocatable physical
88 : /// registers so that the instructions result is independent of the place
89 : /// in the function.
90 871336 : bool isTriviallyReMaterializable(const MachineInstr &MI,
91 : AliasAnalysis *AA = nullptr) const {
92 2613754 : return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
93 1426462 : (MI.getDesc().isRematerializable() &&
94 876793 : (isReallyTriviallyReMaterializable(MI, AA) ||
95 1192749 : isReallyTriviallyReMaterializableGeneric(MI, AA)));
96 : }
97 :
98 : protected:
99 : /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
100 : /// set, this hook lets the target specify whether the instruction is actually
101 : /// trivially rematerializable, taking into consideration its operands. This
102 : /// predicate must return false if the instruction has any side effects other
103 : /// than producing a value, or if it requres any address registers that are
104 : /// not always available.
105 : /// Requirements must be check as stated in isTriviallyReMaterializable() .
106 32036 : virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
107 : AliasAnalysis *AA) const {
108 32036 : return false;
109 : }
110 :
111 : /// This method commutes the operands of the given machine instruction MI.
112 : /// The operands to be commuted are specified by their indices OpIdx1 and
113 : /// OpIdx2.
114 : ///
115 : /// If a target has any instructions that are commutable but require
116 : /// converting to different instructions or making non-trivial changes
117 : /// to commute them, this method can be overloaded to do that.
118 : /// The default implementation simply swaps the commutable operands.
119 : ///
120 : /// If NewMI is false, MI is modified in place and returned; otherwise, a
121 : /// new machine instruction is created and returned.
122 : ///
123 : /// Do not call this method for a non-commutable instruction.
124 : /// Even though the instruction is commutable, the method may still
125 : /// fail to commute the operands, null pointer is returned in such cases.
126 : virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
127 : unsigned OpIdx1,
128 : unsigned OpIdx2) const;
129 :
130 : /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
131 : /// operand indices to (ResultIdx1, ResultIdx2).
132 : /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
133 : /// predefined to some indices or be undefined (designated by the special
134 : /// value 'CommuteAnyOperandIndex').
135 : /// The predefined result indices cannot be re-defined.
136 : /// The function returns true iff after the result pair redefinition
137 : /// the fixed result pair is equal to or equivalent to the source pair of
138 : /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
139 : /// the pairs (x,y) and (y,x) are equivalent.
140 : static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
141 : unsigned CommutableOpIdx1,
142 : unsigned CommutableOpIdx2);
143 :
144 : private:
145 : /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
146 : /// set and the target hook isReallyTriviallyReMaterializable returns false,
147 : /// this function does target-independent tests to determine if the
148 : /// instruction is really trivially rematerializable.
149 : bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
150 : AliasAnalysis *AA) const;
151 :
152 : public:
153 : /// These methods return the opcode of the frame setup/destroy instructions
154 : /// if they exist (-1 otherwise). Some targets use pseudo instructions in
155 : /// order to abstract away the difference between operating with a frame
156 : /// pointer and operating without, through the use of these two instructions.
157 : ///
158 : unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
159 : unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
160 :
161 : /// Returns true if the argument is a frame pseudo instruction.
162 : bool isFrameInstr(const MachineInstr &I) const {
163 11739091 : return I.getOpcode() == getCallFrameSetupOpcode() ||
164 5590784 : I.getOpcode() == getCallFrameDestroyOpcode();
165 : }
166 :
167 : /// Returns true if the argument is a frame setup pseudo instruction.
168 : bool isFrameSetup(const MachineInstr &I) const {
169 695940 : return I.getOpcode() == getCallFrameSetupOpcode();
170 : }
171 :
172 : /// Returns size of the frame associated with the given frame instruction.
173 : /// For frame setup instruction this is frame that is set up space set up
174 : /// after the instruction. For frame destroy instruction this is the frame
175 : /// freed by the caller.
176 : /// Note, in some cases a call frame (or a part of it) may be prepared prior
177 : /// to the frame setup instruction. It occurs in the calls that involve
178 : /// inalloca arguments. This function reports only the size of the frame part
179 : /// that is set up between the frame setup and destroy pseudo instructions.
180 : int64_t getFrameSize(const MachineInstr &I) const {
181 : assert(isFrameInstr(I) && "Not a frame instruction");
182 : assert(I.getOperand(0).getImm() >= 0);
183 876676 : return I.getOperand(0).getImm();
184 : }
185 :
186 : /// Returns the total frame size, which is made up of the space set up inside
187 : /// the pair of frame start-stop instructions and the space that is set up
188 : /// prior to the pair.
189 : int64_t getFrameTotalSize(const MachineInstr &I) const {
190 189226 : if (isFrameSetup(I)) {
191 : assert(I.getOperand(1).getImm() >= 0 &&
192 : "Frame size must not be negative");
193 189226 : return getFrameSize(I) + I.getOperand(1).getImm();
194 : }
195 189226 : return getFrameSize(I);
196 : }
197 :
198 : unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
199 : unsigned getReturnOpcode() const { return ReturnOpcode; }
200 :
201 : /// Returns the actual stack pointer adjustment made by an instruction
202 : /// as part of a call sequence. By default, only call frame setup/destroy
203 : /// instructions adjust the stack, but targets may want to override this
204 : /// to enable more fine-grained adjustment, or adjust by a different value.
205 : virtual int getSPAdjust(const MachineInstr &MI) const;
206 :
207 : /// Return true if the instruction is a "coalescable" extension instruction.
208 : /// That is, it's like a copy where it's legal for the source to overlap the
209 : /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
210 : /// expected the pre-extension value is available as a subreg of the result
211 : /// register. This also returns the sub-register index in SubIdx.
212 860446 : virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
213 : unsigned &DstReg, unsigned &SubIdx) const {
214 860446 : return false;
215 : }
216 :
217 : /// If the specified machine instruction is a direct
218 : /// load from a stack slot, return the virtual or physical register number of
219 : /// the destination along with the FrameIndex of the loaded stack slot. If
220 : /// not, return 0. This predicate must return 0 if the instruction has
221 : /// any side effects other than loading from the stack slot.
222 1655 : virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
223 : int &FrameIndex) const {
224 1655 : return 0;
225 : }
226 :
227 : /// Check for post-frame ptr elimination stack locations as well.
228 : /// This uses a heuristic so it isn't reliable for correctness.
229 708896 : virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
230 : int &FrameIndex) const {
231 708896 : return 0;
232 : }
233 :
234 : /// If the specified machine instruction has a load from a stack slot,
235 : /// return true along with the FrameIndex of the loaded stack slot and the
236 : /// machine mem operand containing the reference.
237 : /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
238 : /// any instructions that loads from the stack. This is just a hint, as some
239 : /// cases may be missed.
240 : virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
241 : const MachineMemOperand *&MMO,
242 : int &FrameIndex) const;
243 :
244 : /// If the specified machine instruction is a direct
245 : /// store to a stack slot, return the virtual or physical register number of
246 : /// the source reg along with the FrameIndex of the loaded stack slot. If
247 : /// not, return 0. This predicate must return 0 if the instruction has
248 : /// any side effects other than storing to the stack slot.
249 999 : virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
250 : int &FrameIndex) const {
251 999 : return 0;
252 : }
253 :
254 : /// Check for post-frame ptr elimination stack locations as well.
255 : /// This uses a heuristic, so it isn't reliable for correctness.
256 689312 : virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
257 : int &FrameIndex) const {
258 689312 : return 0;
259 : }
260 :
261 : /// If the specified machine instruction has a store to a stack slot,
262 : /// return true along with the FrameIndex of the loaded stack slot and the
263 : /// machine mem operand containing the reference.
264 : /// If not, return false. Unlike isStoreToStackSlot,
265 : /// this returns true for any instructions that stores to the
266 : /// stack. This is just a hint, as some cases may be missed.
267 : virtual bool hasStoreToStackSlot(const MachineInstr &MI,
268 : const MachineMemOperand *&MMO,
269 : int &FrameIndex) const;
270 :
271 : /// Return true if the specified machine instruction
272 : /// is a copy of one stack slot to another and has no other effect.
273 : /// Provide the identity of the two frame indices.
274 784849 : virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
275 : int &SrcFrameIndex) const {
276 784849 : return false;
277 : }
278 :
279 : /// Compute the size in bytes and offset within a stack slot of a spilled
280 : /// register or subregister.
281 : ///
282 : /// \param [out] Size in bytes of the spilled value.
283 : /// \param [out] Offset in bytes within the stack slot.
284 : /// \returns true if both Size and Offset are successfully computed.
285 : ///
286 : /// Not all subregisters have computable spill slots. For example,
287 : /// subregisters registers may not be byte-sized, and a pair of discontiguous
288 : /// subregisters has no single offset.
289 : ///
290 : /// Targets with nontrivial bigendian implementations may need to override
291 : /// this, particularly to support spilled vector registers.
292 : virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
293 : unsigned &Size, unsigned &Offset,
294 : const MachineFunction &MF) const;
295 :
296 : /// Returns the size in bytes of the specified MachineInstr, or ~0U
297 : /// when this function is not implemented by a target.
298 0 : virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
299 0 : return ~0U;
300 : }
301 :
302 : /// Return true if the instruction is as cheap as a move instruction.
303 : ///
304 : /// Targets for different archs need to override this, and different
305 : /// micro-architectures can also be finely tuned inside.
306 667192 : virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
307 667192 : return MI.isAsCheapAsAMove();
308 : }
309 :
310 : /// Return true if the instruction should be sunk by MachineSink.
311 : ///
312 : /// MachineSink determines on its own whether the instruction is safe to sink;
313 : /// this gives the target a hook to override the default behavior with regards
314 : /// to which instructions should be sunk.
315 2735912 : virtual bool shouldSink(const MachineInstr &MI) const { return true; }
316 :
317 : /// Re-issue the specified 'original' instruction at the
318 : /// specific location targeting a new destination register.
319 : /// The register in Orig->getOperand(0).getReg() will be substituted by
320 : /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
321 : /// SubIdx.
322 : virtual void reMaterialize(MachineBasicBlock &MBB,
323 : MachineBasicBlock::iterator MI, unsigned DestReg,
324 : unsigned SubIdx, const MachineInstr &Orig,
325 : const TargetRegisterInfo &TRI) const;
326 :
327 : /// \brief Clones instruction or the whole instruction bundle \p Orig and
328 : /// insert into \p MBB before \p InsertBefore. The target may update operands
329 : /// that are required to be unique.
330 : ///
331 : /// \p Orig must not return true for MachineInstr::isNotDuplicable().
332 : virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
333 : MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const;
334 :
335 : /// This method must be implemented by targets that
336 : /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
337 : /// may be able to convert a two-address instruction into one or more true
338 : /// three-address instructions on demand. This allows the X86 target (for
339 : /// example) to convert ADD and SHL instructions into LEA instructions if they
340 : /// would require register copies due to two-addressness.
341 : ///
342 : /// This method returns a null pointer if the transformation cannot be
343 : /// performed, otherwise it returns the last new instruction.
344 : ///
345 0 : virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
346 : MachineInstr &MI,
347 : LiveVariables *LV) const {
348 0 : return nullptr;
349 : }
350 :
351 : // This constant can be used as an input value of operand index passed to
352 : // the method findCommutedOpIndices() to tell the method that the
353 : // corresponding operand index is not pre-defined and that the method
354 : // can pick any commutable operand.
355 : static const unsigned CommuteAnyOperandIndex = ~0U;
356 :
357 : /// This method commutes the operands of the given machine instruction MI.
358 : ///
359 : /// The operands to be commuted are specified by their indices OpIdx1 and
360 : /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
361 : /// 'CommuteAnyOperandIndex', which means that the method is free to choose
362 : /// any arbitrarily chosen commutable operand. If both arguments are set to
363 : /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
364 : /// operands; then commutes them if such operands could be found.
365 : ///
366 : /// If NewMI is false, MI is modified in place and returned; otherwise, a
367 : /// new machine instruction is created and returned.
368 : ///
369 : /// Do not call this method for a non-commutable instruction or
370 : /// for non-commuable operands.
371 : /// Even though the instruction is commutable, the method may still
372 : /// fail to commute the operands, null pointer is returned in such cases.
373 : MachineInstr *
374 : commuteInstruction(MachineInstr &MI, bool NewMI = false,
375 : unsigned OpIdx1 = CommuteAnyOperandIndex,
376 : unsigned OpIdx2 = CommuteAnyOperandIndex) const;
377 :
378 : /// Returns true iff the routine could find two commutable operands in the
379 : /// given machine instruction.
380 : /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
381 : /// If any of the INPUT values is set to the special value
382 : /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
383 : /// operand, then returns its index in the corresponding argument.
384 : /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
385 : /// looks for 2 commutable operands.
386 : /// If INPUT values refer to some operands of MI, then the method simply
387 : /// returns true if the corresponding operands are commutable and returns
388 : /// false otherwise.
389 : ///
390 : /// For example, calling this method this way:
391 : /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
392 : /// findCommutedOpIndices(MI, Op1, Op2);
393 : /// can be interpreted as a query asking to find an operand that would be
394 : /// commutable with the operand#1.
395 : virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
396 : unsigned &SrcOpIdx2) const;
397 :
398 : /// A pair composed of a register and a sub-register index.
399 : /// Used to give some type checking when modeling Reg:SubReg.
400 : struct RegSubRegPair {
401 : unsigned Reg;
402 : unsigned SubReg;
403 :
404 : RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
405 2461666 : : Reg(Reg), SubReg(SubReg) {}
406 : };
407 :
408 : /// A pair composed of a pair of a register and a sub-register index,
409 : /// and another sub-register index.
410 : /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
411 : struct RegSubRegPairAndIdx : RegSubRegPair {
412 : unsigned SubIdx;
413 :
414 : RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
415 : unsigned SubIdx = 0)
416 1073532 : : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
417 : };
418 :
419 : /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
420 : /// and \p DefIdx.
421 : /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
422 : /// the list is modeled as <Reg:SubReg, SubIdx>.
423 : /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
424 : /// two elements:
425 : /// - vreg1:sub1, sub0
426 : /// - vreg2<:0>, sub1
427 : ///
428 : /// \returns true if it is possible to build such an input sequence
429 : /// with the pair \p MI, \p DefIdx. False otherwise.
430 : ///
431 : /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
432 : ///
433 : /// \note The generic implementation does not provide any support for
434 : /// MI.isRegSequenceLike(). In other words, one has to override
435 : /// getRegSequenceLikeInputs for target specific instructions.
436 : bool
437 : getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
438 : SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
439 :
440 : /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
441 : /// and \p DefIdx.
442 : /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
443 : /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
444 : /// - vreg1:sub1, sub0
445 : ///
446 : /// \returns true if it is possible to build such an input sequence
447 : /// with the pair \p MI, \p DefIdx. False otherwise.
448 : ///
449 : /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
450 : ///
451 : /// \note The generic implementation does not provide any support for
452 : /// MI.isExtractSubregLike(). In other words, one has to override
453 : /// getExtractSubregLikeInputs for target specific instructions.
454 : bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
455 : RegSubRegPairAndIdx &InputReg) const;
456 :
457 : /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
458 : /// and \p DefIdx.
459 : /// \p [out] BaseReg and \p [out] InsertedReg contain
460 : /// the equivalent inputs of INSERT_SUBREG.
461 : /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
462 : /// - BaseReg: vreg0:sub0
463 : /// - InsertedReg: vreg1:sub1, sub3
464 : ///
465 : /// \returns true if it is possible to build such an input sequence
466 : /// with the pair \p MI, \p DefIdx. False otherwise.
467 : ///
468 : /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
469 : ///
470 : /// \note The generic implementation does not provide any support for
471 : /// MI.isInsertSubregLike(). In other words, one has to override
472 : /// getInsertSubregLikeInputs for target specific instructions.
473 : bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
474 : RegSubRegPair &BaseReg,
475 : RegSubRegPairAndIdx &InsertedReg) const;
476 :
477 : /// Return true if two machine instructions would produce identical values.
478 : /// By default, this is only true when the two instructions
479 : /// are deemed identical except for defs. If this function is called when the
480 : /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
481 : /// aggressive checks.
482 : virtual bool produceSameValue(const MachineInstr &MI0,
483 : const MachineInstr &MI1,
484 : const MachineRegisterInfo *MRI = nullptr) const;
485 :
486 : /// \returns true if a branch from an instruction with opcode \p BranchOpc
487 : /// bytes is capable of jumping to a position \p BrOffset bytes away.
488 0 : virtual bool isBranchOffsetInRange(unsigned BranchOpc,
489 : int64_t BrOffset) const {
490 0 : llvm_unreachable("target did not implement");
491 : }
492 :
493 : /// \returns The block that branch instruction \p MI jumps to.
494 0 : virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
495 0 : llvm_unreachable("target did not implement");
496 : }
497 :
498 : /// Insert an unconditional indirect branch at the end of \p MBB to \p
499 : /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to
500 : /// the offset of the position to insert the new branch.
501 : ///
502 : /// \returns The number of bytes added to the block.
503 0 : virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB,
504 : MachineBasicBlock &NewDestBB,
505 : const DebugLoc &DL,
506 : int64_t BrOffset = 0,
507 : RegScavenger *RS = nullptr) const {
508 0 : llvm_unreachable("target did not implement");
509 : }
510 :
511 : /// Analyze the branching code at the end of MBB, returning
512 : /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
513 : /// implemented for a target). Upon success, this returns false and returns
514 : /// with the following information in various cases:
515 : ///
516 : /// 1. If this block ends with no branches (it just falls through to its succ)
517 : /// just return false, leaving TBB/FBB null.
518 : /// 2. If this block ends with only an unconditional branch, it sets TBB to be
519 : /// the destination block.
520 : /// 3. If this block ends with a conditional branch and it falls through to a
521 : /// successor block, it sets TBB to be the branch destination block and a
522 : /// list of operands that evaluate the condition. These operands can be
523 : /// passed to other TargetInstrInfo methods to create new branches.
524 : /// 4. If this block ends with a conditional branch followed by an
525 : /// unconditional branch, it returns the 'true' destination in TBB, the
526 : /// 'false' destination in FBB, and a list of operands that evaluate the
527 : /// condition. These operands can be passed to other TargetInstrInfo
528 : /// methods to create new branches.
529 : ///
530 : /// Note that removeBranch and insertBranch must be implemented to support
531 : /// cases where this method returns success.
532 : ///
533 : /// If AllowModify is true, then this routine is allowed to modify the basic
534 : /// block (e.g. delete instructions after the unconditional branch).
535 : ///
536 : /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
537 : /// before calling this function.
538 0 : virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
539 : MachineBasicBlock *&FBB,
540 : SmallVectorImpl<MachineOperand> &Cond,
541 : bool AllowModify = false) const {
542 0 : return true;
543 : }
544 :
545 : /// Represents a predicate at the MachineFunction level. The control flow a
546 : /// MachineBranchPredicate represents is:
547 : ///
548 : /// Reg <def>= LHS `Predicate` RHS == ConditionDef
549 : /// if Reg then goto TrueDest else goto FalseDest
550 : ///
551 : struct MachineBranchPredicate {
552 : enum ComparePredicate {
553 : PRED_EQ, // True if two values are equal
554 : PRED_NE, // True if two values are not equal
555 : PRED_INVALID // Sentinel value
556 : };
557 :
558 : ComparePredicate Predicate = PRED_INVALID;
559 : MachineOperand LHS = MachineOperand::CreateImm(0);
560 : MachineOperand RHS = MachineOperand::CreateImm(0);
561 : MachineBasicBlock *TrueDest = nullptr;
562 : MachineBasicBlock *FalseDest = nullptr;
563 : MachineInstr *ConditionDef = nullptr;
564 :
565 : /// SingleUseCondition is true if ConditionDef is dead except for the
566 : /// branch(es) at the end of the basic block.
567 : ///
568 : bool SingleUseCondition = false;
569 :
570 171 : explicit MachineBranchPredicate() = default;
571 : };
572 :
573 : /// Analyze the branching code at the end of MBB and parse it into the
574 : /// MachineBranchPredicate structure if possible. Returns false on success
575 : /// and true on failure.
576 : ///
577 : /// If AllowModify is true, then this routine is allowed to modify the basic
578 : /// block (e.g. delete instructions after the unconditional branch).
579 : ///
580 0 : virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
581 : MachineBranchPredicate &MBP,
582 : bool AllowModify = false) const {
583 0 : return true;
584 : }
585 :
586 : /// Remove the branching code at the end of the specific MBB.
587 : /// This is only invoked in cases where AnalyzeBranch returns success. It
588 : /// returns the number of instructions that were removed.
589 : /// If \p BytesRemoved is non-null, report the change in code size from the
590 : /// removed instructions.
591 0 : virtual unsigned removeBranch(MachineBasicBlock &MBB,
592 : int *BytesRemoved = nullptr) const {
593 0 : llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
594 : }
595 :
596 : /// Insert branch code into the end of the specified MachineBasicBlock. The
597 : /// operands to this method are the same as those returned by AnalyzeBranch.
598 : /// This is only invoked in cases where AnalyzeBranch returns success. It
599 : /// returns the number of instructions inserted. If \p BytesAdded is non-null,
600 : /// report the change in code size from the added instructions.
601 : ///
602 : /// It is also invoked by tail merging to add unconditional branches in
603 : /// cases where AnalyzeBranch doesn't apply because there was no original
604 : /// branch to analyze. At least this much must be implemented, else tail
605 : /// merging needs to be disabled.
606 : ///
607 : /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
608 : /// before calling this function.
609 0 : virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
610 : MachineBasicBlock *FBB,
611 : ArrayRef<MachineOperand> Cond,
612 : const DebugLoc &DL,
613 : int *BytesAdded = nullptr) const {
614 0 : llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
615 : }
616 :
617 : unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
618 : MachineBasicBlock *DestBB,
619 : const DebugLoc &DL,
620 : int *BytesAdded = nullptr) const {
621 3 : return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
622 3 : BytesAdded);
623 : }
624 :
625 : /// Analyze the loop code, return true if it cannot be understoo. Upon
626 : /// success, this function returns false and returns information about the
627 : /// induction variable and compare instruction used at the end.
628 0 : virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
629 : MachineInstr *&CmpInst) const {
630 0 : return true;
631 : }
632 :
633 : /// Generate code to reduce the loop iteration by one and check if the loop is
634 : /// finished. Return the value/register of the the new loop count. We need
635 : /// this function when peeling off one or more iterations of a loop. This
636 : /// function assumes the nth iteration is peeled first.
637 0 : virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar,
638 : MachineInstr &Cmp,
639 : SmallVectorImpl<MachineOperand> &Cond,
640 : SmallVectorImpl<MachineInstr *> &PrevInsts,
641 : unsigned Iter, unsigned MaxIter) const {
642 0 : llvm_unreachable("Target didn't implement ReduceLoopCount");
643 : }
644 :
645 : /// Delete the instruction OldInst and everything after it, replacing it with
646 : /// an unconditional branch to NewDest. This is used by the tail merging pass.
647 : virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
648 : MachineBasicBlock *NewDest) const;
649 :
650 : /// Return true if it's legal to split the given basic
651 : /// block at the specified instruction (i.e. instruction would be the start
652 : /// of a new basic block).
653 3071 : virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
654 : MachineBasicBlock::iterator MBBI) const {
655 3071 : return true;
656 : }
657 :
658 : /// Return true if it's profitable to predicate
659 : /// instructions with accumulated instruction latency of "NumCycles"
660 : /// of the specified basic block, where the probability of the instructions
661 : /// being executed is given by Probability, and Confidence is a measure
662 : /// of our confidence that it will be properly predicted.
663 0 : virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
664 : unsigned ExtraPredCycles,
665 : BranchProbability Probability) const {
666 0 : return false;
667 : }
668 :
669 : /// Second variant of isProfitableToIfCvt. This one
670 : /// checks for the case where two basic blocks from true and false path
671 : /// of a if-then-else (diamond) are predicated on mutally exclusive
672 : /// predicates, where the probability of the true path being taken is given
673 : /// by Probability, and Confidence is a measure of our confidence that it
674 : /// will be properly predicted.
675 0 : virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
676 : unsigned ExtraTCycles,
677 : MachineBasicBlock &FMBB, unsigned NumFCycles,
678 : unsigned ExtraFCycles,
679 : BranchProbability Probability) const {
680 0 : return false;
681 : }
682 :
683 : /// Return true if it's profitable for if-converter to duplicate instructions
684 : /// of specified accumulated instruction latencies in the specified MBB to
685 : /// enable if-conversion.
686 : /// The probability of the instructions being executed is given by
687 : /// Probability, and Confidence is a measure of our confidence that it
688 : /// will be properly predicted.
689 0 : virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
690 : unsigned NumCycles,
691 : BranchProbability Probability) const {
692 0 : return false;
693 : }
694 :
695 : /// Return true if it's profitable to unpredicate
696 : /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
697 : /// exclusive predicates.
698 : /// e.g.
699 : /// subeq r0, r1, #1
700 : /// addne r0, r1, #1
701 : /// =>
702 : /// sub r0, r1, #1
703 : /// addne r0, r1, #1
704 : ///
705 : /// This may be profitable is conditional instructions are always executed.
706 17 : virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
707 : MachineBasicBlock &FMBB) const {
708 17 : return false;
709 : }
710 :
711 : /// Return true if it is possible to insert a select
712 : /// instruction that chooses between TrueReg and FalseReg based on the
713 : /// condition code in Cond.
714 : ///
715 : /// When successful, also return the latency in cycles from TrueReg,
716 : /// FalseReg, and Cond to the destination register. In most cases, a select
717 : /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
718 : ///
719 : /// Some x86 implementations have 2-cycle cmov instructions.
720 : ///
721 : /// @param MBB Block where select instruction would be inserted.
722 : /// @param Cond Condition returned by AnalyzeBranch.
723 : /// @param TrueReg Virtual register to select when Cond is true.
724 : /// @param FalseReg Virtual register to select when Cond is false.
725 : /// @param CondCycles Latency from Cond+Branch to select output.
726 : /// @param TrueCycles Latency from TrueReg to select output.
727 : /// @param FalseCycles Latency from FalseReg to select output.
728 0 : virtual bool canInsertSelect(const MachineBasicBlock &MBB,
729 : ArrayRef<MachineOperand> Cond, unsigned TrueReg,
730 : unsigned FalseReg, int &CondCycles,
731 : int &TrueCycles, int &FalseCycles) const {
732 0 : return false;
733 : }
734 :
735 : /// Insert a select instruction into MBB before I that will copy TrueReg to
736 : /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
737 : ///
738 : /// This function can only be called after canInsertSelect() returned true.
739 : /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
740 : /// that the same flags or registers required by Cond are available at the
741 : /// insertion point.
742 : ///
743 : /// @param MBB Block where select instruction should be inserted.
744 : /// @param I Insertion point.
745 : /// @param DL Source location for debugging.
746 : /// @param DstReg Virtual register to be defined by select instruction.
747 : /// @param Cond Condition as computed by AnalyzeBranch.
748 : /// @param TrueReg Virtual register to copy when Cond is true.
749 : /// @param FalseReg Virtual register to copy when Cons is false.
750 0 : virtual void insertSelect(MachineBasicBlock &MBB,
751 : MachineBasicBlock::iterator I, const DebugLoc &DL,
752 : unsigned DstReg, ArrayRef<MachineOperand> Cond,
753 : unsigned TrueReg, unsigned FalseReg) const {
754 0 : llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
755 : }
756 :
757 : /// Analyze the given select instruction, returning true if
758 : /// it cannot be understood. It is assumed that MI->isSelect() is true.
759 : ///
760 : /// When successful, return the controlling condition and the operands that
761 : /// determine the true and false result values.
762 : ///
763 : /// Result = SELECT Cond, TrueOp, FalseOp
764 : ///
765 : /// Some targets can optimize select instructions, for example by predicating
766 : /// the instruction defining one of the operands. Such targets should set
767 : /// Optimizable.
768 : ///
769 : /// @param MI Select instruction to analyze.
770 : /// @param Cond Condition controlling the select.
771 : /// @param TrueOp Operand number of the value selected when Cond is true.
772 : /// @param FalseOp Operand number of the value selected when Cond is false.
773 : /// @param Optimizable Returned as true if MI is optimizable.
774 : /// @returns False on success.
775 334 : virtual bool analyzeSelect(const MachineInstr &MI,
776 : SmallVectorImpl<MachineOperand> &Cond,
777 : unsigned &TrueOp, unsigned &FalseOp,
778 : bool &Optimizable) const {
779 : assert(MI.getDesc().isSelect() && "MI must be a select instruction");
780 334 : return true;
781 : }
782 :
783 : /// Given a select instruction that was understood by
784 : /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
785 : /// merging it with one of its operands. Returns NULL on failure.
786 : ///
787 : /// When successful, returns the new select instruction. The client is
788 : /// responsible for deleting MI.
789 : ///
790 : /// If both sides of the select can be optimized, PreferFalse is used to pick
791 : /// a side.
792 : ///
793 : /// @param MI Optimizable select instruction.
794 : /// @param NewMIs Set that record all MIs in the basic block up to \p
795 : /// MI. Has to be updated with any newly created MI or deleted ones.
796 : /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
797 : /// @returns Optimized instruction or NULL.
798 0 : virtual MachineInstr *optimizeSelect(MachineInstr &MI,
799 : SmallPtrSetImpl<MachineInstr *> &NewMIs,
800 : bool PreferFalse = false) const {
801 : // This function must be implemented if Optimizable is ever set.
802 0 : llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
803 : }
804 :
805 : /// Emit instructions to copy a pair of physical registers.
806 : ///
807 : /// This function should support copies within any legal register class as
808 : /// well as any cross-class copies created during instruction selection.
809 : ///
810 : /// The source and destination registers may overlap, which may require a
811 : /// careful implementation when multiple copy instructions are required for
812 : /// large registers. See for example the ARM target.
813 0 : virtual void copyPhysReg(MachineBasicBlock &MBB,
814 : MachineBasicBlock::iterator MI, const DebugLoc &DL,
815 : unsigned DestReg, unsigned SrcReg,
816 : bool KillSrc) const {
817 0 : llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
818 : }
819 :
820 : /// Store the specified register of the given register class to the specified
821 : /// stack frame index. The store instruction is to be added to the given
822 : /// machine basic block before the specified machine instruction. If isKill
823 : /// is true, the register operand is the last use and must be marked kill.
824 0 : virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
825 : MachineBasicBlock::iterator MI,
826 : unsigned SrcReg, bool isKill, int FrameIndex,
827 : const TargetRegisterClass *RC,
828 : const TargetRegisterInfo *TRI) const {
829 0 : llvm_unreachable("Target didn't implement "
830 : "TargetInstrInfo::storeRegToStackSlot!");
831 : }
832 :
833 : /// Load the specified register of the given register class from the specified
834 : /// stack frame index. The load instruction is to be added to the given
835 : /// machine basic block before the specified machine instruction.
836 0 : virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
837 : MachineBasicBlock::iterator MI,
838 : unsigned DestReg, int FrameIndex,
839 : const TargetRegisterClass *RC,
840 : const TargetRegisterInfo *TRI) const {
841 0 : llvm_unreachable("Target didn't implement "
842 : "TargetInstrInfo::loadRegFromStackSlot!");
843 : }
844 :
845 : /// This function is called for all pseudo instructions
846 : /// that remain after register allocation. Many pseudo instructions are
847 : /// created to help register allocation. This is the place to convert them
848 : /// into real instructions. The target can edit MI in place, or it can insert
849 : /// new instructions and erase MI. The function should return true if
850 : /// anything was changed.
851 804 : virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
852 :
853 : /// Check whether the target can fold a load that feeds a subreg operand
854 : /// (or a subreg operand that feeds a store).
855 : /// For example, X86 may want to return true if it can fold
856 : /// movl (%esp), %eax
857 : /// subb, %al, ...
858 : /// Into:
859 : /// subb (%esp), ...
860 : ///
861 : /// Ideally, we'd like the target implementation of foldMemoryOperand() to
862 : /// reject subregs - but since this behavior used to be enforced in the
863 : /// target-independent code, moving this responsibility to the targets
864 : /// has the potential of causing nasty silent breakage in out-of-tree targets.
865 6444 : virtual bool isSubregFoldable() const { return false; }
866 :
867 : /// Attempt to fold a load or store of the specified stack
868 : /// slot into the specified machine instruction for the specified operand(s).
869 : /// If this is possible, a new instruction is returned with the specified
870 : /// operand folded, otherwise NULL is returned.
871 : /// The new instruction is inserted before MI, and the client is responsible
872 : /// for removing the old instruction.
873 : MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
874 : int FrameIndex,
875 : LiveIntervals *LIS = nullptr) const;
876 :
877 : /// Same as the previous version except it allows folding of any load and
878 : /// store from / to any address, not just from a specific stack slot.
879 : MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
880 : MachineInstr &LoadMI,
881 : LiveIntervals *LIS = nullptr) const;
882 :
883 : /// Return true when there is potentially a faster code sequence
884 : /// for an instruction chain ending in \p Root. All potential patterns are
885 : /// returned in the \p Pattern vector. Pattern should be sorted in priority
886 : /// order since the pattern evaluator stops checking as soon as it finds a
887 : /// faster sequence.
888 : /// \param Root - Instruction that could be combined with one of its operands
889 : /// \param Patterns - Vector of possible combination patterns
890 : virtual bool getMachineCombinerPatterns(
891 : MachineInstr &Root,
892 : SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
893 :
894 : /// Return true when a code sequence can improve throughput. It
895 : /// should be called only for instructions in loops.
896 : /// \param Pattern - combiner pattern
897 : virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
898 :
899 : /// Return true if the input \P Inst is part of a chain of dependent ops
900 : /// that are suitable for reassociation, otherwise return false.
901 : /// If the instruction's operands must be commuted to have a previous
902 : /// instruction of the same type define the first source operand, \P Commuted
903 : /// will be set to true.
904 : bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
905 :
906 : /// Return true when \P Inst is both associative and commutative.
907 0 : virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
908 0 : return false;
909 : }
910 :
911 : /// Return true when \P Inst has reassociable operands in the same \P MBB.
912 : virtual bool hasReassociableOperands(const MachineInstr &Inst,
913 : const MachineBasicBlock *MBB) const;
914 :
915 : /// Return true when \P Inst has reassociable sibling.
916 : bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
917 :
918 : /// When getMachineCombinerPatterns() finds patterns, this function generates
919 : /// the instructions that could replace the original code sequence. The client
920 : /// has to decide whether the actual replacement is beneficial or not.
921 : /// \param Root - Instruction that could be combined with one of its operands
922 : /// \param Pattern - Combination pattern for Root
923 : /// \param InsInstrs - Vector of new instructions that implement P
924 : /// \param DelInstrs - Old instructions, including Root, that could be
925 : /// replaced by InsInstr
926 : /// \param InstrIdxForVirtReg - map of virtual register to instruction in
927 : /// InsInstr that defines it
928 : virtual void genAlternativeCodeSequence(
929 : MachineInstr &Root, MachineCombinerPattern Pattern,
930 : SmallVectorImpl<MachineInstr *> &InsInstrs,
931 : SmallVectorImpl<MachineInstr *> &DelInstrs,
932 : DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
933 :
934 : /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
935 : /// reduce critical path length.
936 : void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
937 : MachineCombinerPattern Pattern,
938 : SmallVectorImpl<MachineInstr *> &InsInstrs,
939 : SmallVectorImpl<MachineInstr *> &DelInstrs,
940 : DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
941 :
942 : /// This is an architecture-specific helper function of reassociateOps.
943 : /// Set special operand attributes for new instructions after reassociation.
944 205 : virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
945 : MachineInstr &NewMI1,
946 205 : MachineInstr &NewMI2) const {}
947 :
948 : /// Return true when a target supports MachineCombiner.
949 0 : virtual bool useMachineCombiner() const { return false; }
950 :
951 : protected:
952 : /// Target-dependent implementation for foldMemoryOperand.
953 : /// Target-independent code in foldMemoryOperand will
954 : /// take care of adding a MachineMemOperand to the newly created instruction.
955 : /// The instruction and any auxiliary instructions necessary will be inserted
956 : /// at InsertPt.
957 : virtual MachineInstr *
958 5608 : foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
959 : ArrayRef<unsigned> Ops,
960 : MachineBasicBlock::iterator InsertPt, int FrameIndex,
961 : LiveIntervals *LIS = nullptr) const {
962 5608 : return nullptr;
963 : }
964 :
965 : /// Target-dependent implementation for foldMemoryOperand.
966 : /// Target-independent code in foldMemoryOperand will
967 : /// take care of adding a MachineMemOperand to the newly created instruction.
968 : /// The instruction and any auxiliary instructions necessary will be inserted
969 : /// at InsertPt.
970 24 : virtual MachineInstr *foldMemoryOperandImpl(
971 : MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
972 : MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
973 : LiveIntervals *LIS = nullptr) const {
974 24 : return nullptr;
975 : }
976 :
977 : /// \brief Target-dependent implementation of getRegSequenceInputs.
978 : ///
979 : /// \returns true if it is possible to build the equivalent
980 : /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
981 : ///
982 : /// \pre MI.isRegSequenceLike().
983 : ///
984 : /// \see TargetInstrInfo::getRegSequenceInputs.
985 5 : virtual bool getRegSequenceLikeInputs(
986 : const MachineInstr &MI, unsigned DefIdx,
987 : SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
988 5 : return false;
989 : }
990 :
991 : /// \brief Target-dependent implementation of getExtractSubregInputs.
992 : ///
993 : /// \returns true if it is possible to build the equivalent
994 : /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
995 : ///
996 : /// \pre MI.isExtractSubregLike().
997 : ///
998 : /// \see TargetInstrInfo::getExtractSubregInputs.
999 0 : virtual bool getExtractSubregLikeInputs(const MachineInstr &MI,
1000 : unsigned DefIdx,
1001 : RegSubRegPairAndIdx &InputReg) const {
1002 0 : return false;
1003 : }
1004 :
1005 : /// \brief Target-dependent implementation of getInsertSubregInputs.
1006 : ///
1007 : /// \returns true if it is possible to build the equivalent
1008 : /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1009 : ///
1010 : /// \pre MI.isInsertSubregLike().
1011 : ///
1012 : /// \see TargetInstrInfo::getInsertSubregInputs.
1013 : virtual bool
1014 0 : getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
1015 : RegSubRegPair &BaseReg,
1016 : RegSubRegPairAndIdx &InsertedReg) const {
1017 0 : return false;
1018 : }
1019 :
1020 : public:
1021 : /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1022 : /// a store or a load and a store into two or more instruction. If this is
1023 : /// possible, returns true as well as the new instructions by reference.
1024 : virtual bool
1025 0 : unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
1026 : bool UnfoldLoad, bool UnfoldStore,
1027 : SmallVectorImpl<MachineInstr *> &NewMIs) const {
1028 0 : return false;
1029 : }
1030 :
1031 0 : virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
1032 : SmallVectorImpl<SDNode *> &NewNodes) const {
1033 0 : return false;
1034 : }
1035 :
1036 : /// Returns the opcode of the would be new
1037 : /// instruction after load / store are unfolded from an instruction of the
1038 : /// specified opcode. It returns zero if the specified unfolding is not
1039 : /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1040 : /// index of the operand which will hold the register holding the loaded
1041 : /// value.
1042 : virtual unsigned
1043 466 : getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1044 : unsigned *LoadRegIndex = nullptr) const {
1045 466 : return 0;
1046 : }
1047 :
1048 : /// This is used by the pre-regalloc scheduler to determine if two loads are
1049 : /// loading from the same base address. It should only return true if the base
1050 : /// pointers are the same and the only differences between the two addresses
1051 : /// are the offset. It also returns the offsets by reference.
1052 282735 : virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1053 : int64_t &Offset1,
1054 : int64_t &Offset2) const {
1055 282735 : return false;
1056 : }
1057 :
1058 : /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1059 : /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1060 : /// On some targets if two loads are loading from
1061 : /// addresses in the same cache line, it's better if they are scheduled
1062 : /// together. This function takes two integers that represent the load offsets
1063 : /// from the common base address. It returns true if it decides it's desirable
1064 : /// to schedule the two loads together. "NumLoads" is the number of loads that
1065 : /// have already been scheduled after Load1.
1066 0 : virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1067 : int64_t Offset1, int64_t Offset2,
1068 : unsigned NumLoads) const {
1069 0 : return false;
1070 : }
1071 :
1072 : /// Get the base register and byte offset of an instruction that reads/writes
1073 : /// memory.
1074 0 : virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
1075 : int64_t &Offset,
1076 : const TargetRegisterInfo *TRI) const {
1077 0 : return false;
1078 : }
1079 :
1080 : /// Return true if the instruction contains a base register and offset. If
1081 : /// true, the function also sets the operand position in the instruction
1082 : /// for the base register and offset.
1083 0 : virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
1084 : unsigned &BasePos,
1085 : unsigned &OffsetPos) const {
1086 0 : return false;
1087 : }
1088 :
1089 : /// If the instruction is an increment of a constant value, return the amount.
1090 0 : virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1091 0 : return false;
1092 : }
1093 :
1094 : /// Returns true if the two given memory operations should be scheduled
1095 : /// adjacent. Note that you have to add:
1096 : /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1097 : /// or
1098 : /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1099 : /// to TargetPassConfig::createMachineScheduler() to have an effect.
1100 0 : virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt,
1101 : MachineInstr &SecondLdSt,
1102 : unsigned NumLoads) const {
1103 0 : llvm_unreachable("target did not implement shouldClusterMemOps()");
1104 : }
1105 :
1106 : /// Reverses the branch condition of the specified condition list,
1107 : /// returning false on success and true if it cannot be reversed.
1108 : virtual bool
1109 94 : reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1110 94 : return true;
1111 : }
1112 :
1113 : /// Insert a noop into the instruction stream at the specified point.
1114 : virtual void insertNoop(MachineBasicBlock &MBB,
1115 : MachineBasicBlock::iterator MI) const;
1116 :
1117 : /// Return the noop instruction to use for a noop.
1118 : virtual void getNoop(MCInst &NopInst) const;
1119 :
1120 : /// Return true for post-incremented instructions.
1121 0 : virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1122 :
1123 : /// Returns true if the instruction is already predicated.
1124 3903411 : virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1125 :
1126 : /// Returns true if the instruction is a
1127 : /// terminator instruction that has not been predicated.
1128 : virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1129 :
1130 : /// Returns true if MI is an unconditional tail call.
1131 710 : virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1132 710 : return false;
1133 : }
1134 :
1135 : /// Returns true if the tail call can be made conditional on BranchCond.
1136 0 : virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
1137 : const MachineInstr &TailCall) const {
1138 0 : return false;
1139 : }
1140 :
1141 : /// Replace the conditional branch in MBB with a conditional tail call.
1142 0 : virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
1143 : SmallVectorImpl<MachineOperand> &Cond,
1144 : const MachineInstr &TailCall) const {
1145 0 : llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1146 : }
1147 :
1148 : /// Convert the instruction into a predicated instruction.
1149 : /// It returns true if the operation was successful.
1150 : virtual bool PredicateInstruction(MachineInstr &MI,
1151 : ArrayRef<MachineOperand> Pred) const;
1152 :
1153 : /// Returns true if the first specified predicate
1154 : /// subsumes the second, e.g. GE subsumes GT.
1155 0 : virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1156 : ArrayRef<MachineOperand> Pred2) const {
1157 0 : return false;
1158 : }
1159 :
1160 : /// If the specified instruction defines any predicate
1161 : /// or condition code register(s) used for predication, returns true as well
1162 : /// as the definition predicate(s) by reference.
1163 10046 : virtual bool DefinesPredicate(MachineInstr &MI,
1164 : std::vector<MachineOperand> &Pred) const {
1165 10046 : return false;
1166 : }
1167 :
1168 : /// Return true if the specified instruction can be predicated.
1169 : /// By default, this returns true for every instruction with a
1170 : /// PredicateOperand.
1171 0 : virtual bool isPredicable(const MachineInstr &MI) const {
1172 812 : return MI.getDesc().isPredicable();
1173 : }
1174 :
1175 : /// Return true if it's safe to move a machine
1176 : /// instruction that defines the specified register class.
1177 56780 : virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1178 56780 : return true;
1179 : }
1180 :
1181 : /// Test if the given instruction should be considered a scheduling boundary.
1182 : /// This primarily includes labels and terminators.
1183 : virtual bool isSchedulingBoundary(const MachineInstr &MI,
1184 : const MachineBasicBlock *MBB,
1185 : const MachineFunction &MF) const;
1186 :
1187 : /// Measure the specified inline asm to determine an approximation of its
1188 : /// length.
1189 : virtual unsigned getInlineAsmLength(const char *Str,
1190 : const MCAsmInfo &MAI) const;
1191 :
1192 : /// Allocate and return a hazard recognizer to use for this target when
1193 : /// scheduling the machine instructions before register allocation.
1194 : virtual ScheduleHazardRecognizer *
1195 : CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1196 : const ScheduleDAG *DAG) const;
1197 :
1198 : /// Allocate and return a hazard recognizer to use for this target when
1199 : /// scheduling the machine instructions before register allocation.
1200 : virtual ScheduleHazardRecognizer *
1201 : CreateTargetMIHazardRecognizer(const InstrItineraryData *,
1202 : const ScheduleDAG *DAG) const;
1203 :
1204 : /// Allocate and return a hazard recognizer to use for this target when
1205 : /// scheduling the machine instructions after register allocation.
1206 : virtual ScheduleHazardRecognizer *
1207 : CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
1208 : const ScheduleDAG *DAG) const;
1209 :
1210 : /// Allocate and return a hazard recognizer to use for by non-scheduling
1211 : /// passes.
1212 : virtual ScheduleHazardRecognizer *
1213 0 : CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
1214 0 : return nullptr;
1215 : }
1216 :
1217 : /// Provide a global flag for disabling the PreRA hazard recognizer that
1218 : /// targets may choose to honor.
1219 : bool usePreRAHazardRecognizer() const;
1220 :
1221 : /// For a comparison instruction, return the source registers
1222 : /// in SrcReg and SrcReg2 if having two register operands, and the value it
1223 : /// compares against in CmpValue. Return true if the comparison instruction
1224 : /// can be analyzed.
1225 5259 : virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1226 : unsigned &SrcReg2, int &Mask, int &Value) const {
1227 5259 : return false;
1228 : }
1229 :
1230 : /// See if the comparison instruction can be converted
1231 : /// into something more efficient. E.g., on ARM most instructions can set the
1232 : /// flags register, obviating the need for a separate CMP.
1233 540 : virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
1234 : unsigned SrcReg2, int Mask, int Value,
1235 : const MachineRegisterInfo *MRI) const {
1236 540 : return false;
1237 : }
1238 41490 : virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1239 :
1240 : /// Try to remove the load by folding it to a register operand at the use.
1241 : /// We fold the load instructions if and only if the
1242 : /// def and use are in the same BB. We only look at one load and see
1243 : /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1244 : /// defined by the load we are trying to fold. DefMI returns the machine
1245 : /// instruction that defines FoldAsLoadDefReg, and the function returns
1246 : /// the machine instruction generated due to folding.
1247 12682 : virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
1248 : const MachineRegisterInfo *MRI,
1249 : unsigned &FoldAsLoadDefReg,
1250 : MachineInstr *&DefMI) const {
1251 12682 : return nullptr;
1252 : }
1253 :
1254 : /// 'Reg' is known to be defined by a move immediate instruction,
1255 : /// try to fold the immediate into the use instruction.
1256 : /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1257 : /// then the caller may assume that DefMI has been erased from its parent
1258 : /// block. The caller may assume that it will not be erased by this
1259 : /// function otherwise.
1260 2702 : virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1261 : unsigned Reg, MachineRegisterInfo *MRI) const {
1262 2702 : return false;
1263 : }
1264 :
1265 : /// Return the number of u-operations the given machine
1266 : /// instruction will be decoded to on the target cpu. The itinerary's
1267 : /// IssueWidth is the number of microops that can be dispatched each
1268 : /// cycle. An instruction with zero microops takes no dispatch resources.
1269 : virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1270 : const MachineInstr &MI) const;
1271 :
1272 : /// Return true for pseudo instructions that don't consume any
1273 : /// machine resources in their current form. These are common cases that the
1274 : /// scheduler should consider free, rather than conservatively handling them
1275 : /// as instructions with no itinerary.
1276 : bool isZeroCost(unsigned Opcode) const {
1277 : return Opcode <= TargetOpcode::COPY;
1278 : }
1279 :
1280 : virtual int getOperandLatency(const InstrItineraryData *ItinData,
1281 : SDNode *DefNode, unsigned DefIdx,
1282 : SDNode *UseNode, unsigned UseIdx) const;
1283 :
1284 : /// Compute and return the use operand latency of a given pair of def and use.
1285 : /// In most cases, the static scheduling itinerary was enough to determine the
1286 : /// operand latency. But it may not be possible for instructions with variable
1287 : /// number of defs / uses.
1288 : ///
1289 : /// This is a raw interface to the itinerary that may be directly overridden
1290 : /// by a target. Use computeOperandLatency to get the best estimate of
1291 : /// latency.
1292 : virtual int getOperandLatency(const InstrItineraryData *ItinData,
1293 : const MachineInstr &DefMI, unsigned DefIdx,
1294 : const MachineInstr &UseMI,
1295 : unsigned UseIdx) const;
1296 :
1297 : /// Compute the instruction latency of a given instruction.
1298 : /// If the instruction has higher cost when predicated, it's returned via
1299 : /// PredCost.
1300 : virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1301 : const MachineInstr &MI,
1302 : unsigned *PredCost = nullptr) const;
1303 :
1304 : virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1305 :
1306 : virtual int getInstrLatency(const InstrItineraryData *ItinData,
1307 : SDNode *Node) const;
1308 :
1309 : /// Return the default expected latency for a def based on its opcode.
1310 : unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1311 : const MachineInstr &DefMI) const;
1312 :
1313 : int computeDefOperandLatency(const InstrItineraryData *ItinData,
1314 : const MachineInstr &DefMI) const;
1315 :
1316 : /// Return true if this opcode has high latency to its result.
1317 474437 : virtual bool isHighLatencyDef(int opc) const { return false; }
1318 :
1319 : /// Compute operand latency between a def of 'Reg'
1320 : /// and a use in the current loop. Return true if the target considered
1321 : /// it 'high'. This is used by optimization passes such as machine LICM to
1322 : /// determine whether it makes sense to hoist an instruction out even in a
1323 : /// high register pressure situation.
1324 1098 : virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1325 : const MachineRegisterInfo *MRI,
1326 : const MachineInstr &DefMI, unsigned DefIdx,
1327 : const MachineInstr &UseMI,
1328 : unsigned UseIdx) const {
1329 1098 : return false;
1330 : }
1331 :
1332 : /// Compute operand latency of a def of 'Reg'. Return true
1333 : /// if the target considered it 'low'.
1334 : virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1335 : const MachineInstr &DefMI,
1336 : unsigned DefIdx) const;
1337 :
1338 : /// Perform target-specific instruction verification.
1339 4909276 : virtual bool verifyInstruction(const MachineInstr &MI,
1340 : StringRef &ErrInfo) const {
1341 4909276 : return true;
1342 : }
1343 :
1344 : /// Return the current execution domain and bit mask of
1345 : /// possible domains for instruction.
1346 : ///
1347 : /// Some micro-architectures have multiple execution domains, and multiple
1348 : /// opcodes that perform the same operation in different domains. For
1349 : /// example, the x86 architecture provides the por, orps, and orpd
1350 : /// instructions that all do the same thing. There is a latency penalty if a
1351 : /// register is written in one domain and read in another.
1352 : ///
1353 : /// This function returns a pair (domain, mask) containing the execution
1354 : /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1355 : /// function can be used to change the opcode to one of the domains in the
1356 : /// bit mask. Instructions whose execution domain can't be changed should
1357 : /// return a 0 mask.
1358 : ///
1359 : /// The execution domain numbers don't have any special meaning except domain
1360 : /// 0 is used for instructions that are not associated with any interesting
1361 : /// execution domain.
1362 : ///
1363 : virtual std::pair<uint16_t, uint16_t>
1364 0 : getExecutionDomain(const MachineInstr &MI) const {
1365 0 : return std::make_pair(0, 0);
1366 : }
1367 :
1368 : /// Change the opcode of MI to execute in Domain.
1369 : ///
1370 : /// The bit (1 << Domain) must be set in the mask returned from
1371 : /// getExecutionDomain(MI).
1372 0 : virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1373 :
1374 : /// Returns the preferred minimum clearance
1375 : /// before an instruction with an unwanted partial register update.
1376 : ///
1377 : /// Some instructions only write part of a register, and implicitly need to
1378 : /// read the other parts of the register. This may cause unwanted stalls
1379 : /// preventing otherwise unrelated instructions from executing in parallel in
1380 : /// an out-of-order CPU.
1381 : ///
1382 : /// For example, the x86 instruction cvtsi2ss writes its result to bits
1383 : /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1384 : /// the instruction needs to wait for the old value of the register to become
1385 : /// available:
1386 : ///
1387 : /// addps %xmm1, %xmm0
1388 : /// movaps %xmm0, (%rax)
1389 : /// cvtsi2ss %rbx, %xmm0
1390 : ///
1391 : /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1392 : /// instruction before it can issue, even though the high bits of %xmm0
1393 : /// probably aren't needed.
1394 : ///
1395 : /// This hook returns the preferred clearance before MI, measured in
1396 : /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1397 : /// instructions before MI. It should only return a positive value for
1398 : /// unwanted dependencies. If the old bits of the defined register have
1399 : /// useful values, or if MI is determined to otherwise read the dependency,
1400 : /// the hook should return 0.
1401 : ///
1402 : /// The unwanted dependency may be handled by:
1403 : ///
1404 : /// 1. Allocating the same register for an MI def and use. That makes the
1405 : /// unwanted dependency identical to a required dependency.
1406 : ///
1407 : /// 2. Allocating a register for the def that has no defs in the previous N
1408 : /// instructions.
1409 : ///
1410 : /// 3. Calling breakPartialRegDependency() with the same arguments. This
1411 : /// allows the target to insert a dependency breaking instruction.
1412 : ///
1413 : virtual unsigned
1414 0 : getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
1415 : const TargetRegisterInfo *TRI) const {
1416 : // The default implementation returns 0 for no partial register dependency.
1417 0 : return 0;
1418 : }
1419 :
1420 : /// \brief Return the minimum clearance before an instruction that reads an
1421 : /// unused register.
1422 : ///
1423 : /// For example, AVX instructions may copy part of a register operand into
1424 : /// the unused high bits of the destination register.
1425 : ///
1426 : /// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
1427 : ///
1428 : /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1429 : /// false dependence on any previous write to %xmm0.
1430 : ///
1431 : /// This hook works similarly to getPartialRegUpdateClearance, except that it
1432 : /// does not take an operand index. Instead sets \p OpNum to the index of the
1433 : /// unused register.
1434 84900 : virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
1435 : const TargetRegisterInfo *TRI) const {
1436 : // The default implementation returns 0 for no undef register dependency.
1437 84900 : return 0;
1438 : }
1439 :
1440 : /// Insert a dependency-breaking instruction
1441 : /// before MI to eliminate an unwanted dependency on OpNum.
1442 : ///
1443 : /// If it wasn't possible to avoid a def in the last N instructions before MI
1444 : /// (see getPartialRegUpdateClearance), this hook will be called to break the
1445 : /// unwanted dependency.
1446 : ///
1447 : /// On x86, an xorps instruction can be used as a dependency breaker:
1448 : ///
1449 : /// addps %xmm1, %xmm0
1450 : /// movaps %xmm0, (%rax)
1451 : /// xorps %xmm0, %xmm0
1452 : /// cvtsi2ss %rbx, %xmm0
1453 : ///
1454 : /// An <imp-kill> operand should be added to MI if an instruction was
1455 : /// inserted. This ties the instructions together in the post-ra scheduler.
1456 : ///
1457 0 : virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1458 0 : const TargetRegisterInfo *TRI) const {}
1459 :
1460 : /// Create machine specific model for scheduling.
1461 : virtual DFAPacketizer *
1462 0 : CreateTargetScheduleState(const TargetSubtargetInfo &) const {
1463 0 : return nullptr;
1464 : }
1465 :
1466 : /// Sometimes, it is possible for the target
1467 : /// to tell, even without aliasing information, that two MIs access different
1468 : /// memory addresses. This function returns true if two MIs access different
1469 : /// memory addresses and false otherwise.
1470 : ///
1471 : /// Assumes any physical registers used to compute addresses have the same
1472 : /// value for both instructions. (This is the most useful assumption for
1473 : /// post-RA scheduling.)
1474 : ///
1475 : /// See also MachineInstr::mayAlias, which is implemented on top of this
1476 : /// function.
1477 : virtual bool
1478 2284541 : areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
1479 : AliasAnalysis *AA = nullptr) const {
1480 : assert((MIa.mayLoad() || MIa.mayStore()) &&
1481 : "MIa must load from or modify a memory location");
1482 : assert((MIb.mayLoad() || MIb.mayStore()) &&
1483 : "MIb must load from or modify a memory location");
1484 2284541 : return false;
1485 : }
1486 :
1487 : /// \brief Return the value to use for the MachineCSE's LookAheadLimit,
1488 : /// which is a heuristic used for CSE'ing phys reg defs.
1489 121031 : virtual unsigned getMachineCSELookAheadLimit() const {
1490 : // The default lookahead is small to prevent unprofitable quadratic
1491 : // behavior.
1492 121031 : return 5;
1493 : }
1494 :
1495 : /// Return an array that contains the ids of the target indices (used for the
1496 : /// TargetIndex machine operand) and their names.
1497 : ///
1498 : /// MIR Serialization is able to serialize only the target indices that are
1499 : /// defined by this method.
1500 : virtual ArrayRef<std::pair<int, const char *>>
1501 0 : getSerializableTargetIndices() const {
1502 0 : return None;
1503 : }
1504 :
1505 : /// Decompose the machine operand's target flags into two values - the direct
1506 : /// target flag value and any of bit flags that are applied.
1507 : virtual std::pair<unsigned, unsigned>
1508 12 : decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
1509 12 : return std::make_pair(0u, 0u);
1510 : }
1511 :
1512 : /// Return an array that contains the direct target flag values and their
1513 : /// names.
1514 : ///
1515 : /// MIR Serialization is able to serialize only the target flags that are
1516 : /// defined by this method.
1517 : virtual ArrayRef<std::pair<unsigned, const char *>>
1518 0 : getSerializableDirectMachineOperandTargetFlags() const {
1519 0 : return None;
1520 : }
1521 :
1522 : /// Return an array that contains the bitmask target flag values and their
1523 : /// names.
1524 : ///
1525 : /// MIR Serialization is able to serialize only the target flags that are
1526 : /// defined by this method.
1527 : virtual ArrayRef<std::pair<unsigned, const char *>>
1528 1 : getSerializableBitmaskMachineOperandTargetFlags() const {
1529 1 : return None;
1530 : }
1531 :
1532 : /// Return an array that contains the MMO target flag values and their
1533 : /// names.
1534 : ///
1535 : /// MIR Serialization is able to serialize only the MMO target flags that are
1536 : /// defined by this method.
1537 : virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
1538 0 : getSerializableMachineMemOperandTargetFlags() const {
1539 0 : return None;
1540 : }
1541 :
1542 : /// Determines whether \p Inst is a tail call instruction. Override this
1543 : /// method on targets that do not properly set MCID::Return and MCID::Call on
1544 : /// tail call instructions."
1545 148 : virtual bool isTailCall(const MachineInstr &Inst) const {
1546 184 : return Inst.isReturn() && Inst.isCall();
1547 : }
1548 :
1549 : /// True if the instruction is bound to the top of its basic block and no
1550 : /// other instructions shall be inserted before it. This can be implemented
1551 : /// to prevent register allocator to insert spills before such instructions.
1552 67853 : virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
1553 67853 : return false;
1554 : }
1555 :
1556 : /// \brief Returns the number of instructions that will be taken to call a
1557 : /// function defined by the sequence on the closed interval [ \p StartIt, \p
1558 : /// EndIt].
1559 : ///
1560 : /// \returns The number of instructions for the call in the first member,
1561 : /// and a target-defined unsigned describing what type of call to emit in the
1562 : /// second member.
1563 : virtual std::pair<size_t, unsigned>
1564 0 : getOutliningCallOverhead(MachineBasicBlock::iterator &StartIt,
1565 : MachineBasicBlock::iterator &EndIt) const {
1566 0 : llvm_unreachable(
1567 : "Target didn't implement TargetInstrInfo::getOutliningCallOverhead!");
1568 : }
1569 :
1570 : /// \brief Returns the number of instructions that will be taken to construct
1571 : /// an outlined function frame for a function defined on the closed interval
1572 : /// [ \p StartIt, \p EndIt].
1573 : ///
1574 : /// \returns The number of instructions for the frame in the first member
1575 : /// and a target-defined unsigned describing what type of frame to construct
1576 : /// in the second member.
1577 0 : virtual std::pair<size_t, unsigned> getOutliningFrameOverhead(
1578 : std::vector<
1579 : std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
1580 : &CandidateClass) const {
1581 0 : llvm_unreachable(
1582 : "Target didn't implement TargetInstrInfo::getOutliningFrameOverhead!");
1583 : }
1584 :
1585 : /// Represents how an instruction should be mapped by the outliner.
1586 : /// \p Legal instructions are those which are safe to outline.
1587 : /// \p Illegal instructions are those which cannot be outlined.
1588 : /// \p Invisible instructions are instructions which can be outlined, but
1589 : /// shouldn't actually impact the outlining result.
1590 : enum MachineOutlinerInstrType { Legal, Illegal, Invisible };
1591 :
1592 : /// Returns how or if \p MI should be outlined.
1593 0 : virtual MachineOutlinerInstrType getOutliningType(MachineInstr &MI) const {
1594 0 : llvm_unreachable(
1595 : "Target didn't implement TargetInstrInfo::getOutliningType!");
1596 : }
1597 :
1598 : /// Insert a custom epilogue for outlined functions.
1599 : /// This may be empty, in which case no epilogue or return statement will be
1600 : /// emitted.
1601 0 : virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB,
1602 : MachineFunction &MF,
1603 : unsigned FrameClass) const {
1604 0 : llvm_unreachable(
1605 : "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!");
1606 : }
1607 :
1608 : /// Insert a call to an outlined function into the program.
1609 : /// Returns an iterator to the spot where we inserted the call. This must be
1610 : /// implemented by the target.
1611 : virtual MachineBasicBlock::iterator
1612 0 : insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
1613 : MachineBasicBlock::iterator &It, MachineFunction &MF,
1614 : unsigned CallClass) const {
1615 0 : llvm_unreachable(
1616 : "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
1617 : }
1618 :
1619 : /// Insert a custom prologue for outlined functions.
1620 : /// This may be empty, in which case no prologue will be emitted.
1621 0 : virtual void insertOutlinerPrologue(MachineBasicBlock &MBB,
1622 : MachineFunction &MF,
1623 : unsigned FrameClass) const {
1624 0 : llvm_unreachable(
1625 : "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!");
1626 : }
1627 :
1628 : /// Return true if the function can safely be outlined from.
1629 : /// By default, this means that the function has no red zone.
1630 0 : virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF) const {
1631 0 : llvm_unreachable("Target didn't implement "
1632 : "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
1633 : }
1634 :
1635 : private:
1636 : unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
1637 : unsigned CatchRetOpcode;
1638 : unsigned ReturnOpcode;
1639 : };
1640 :
1641 : /// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
1642 : template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
1643 : using RegInfo = DenseMapInfo<unsigned>;
1644 :
1645 : static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
1646 4162441 : return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
1647 : RegInfo::getEmptyKey());
1648 : }
1649 :
1650 : static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
1651 2651431 : return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
1652 : RegInfo::getTombstoneKey());
1653 : }
1654 :
1655 : /// \brief Reuse getHashValue implementation from
1656 : /// std::pair<unsigned, unsigned>.
1657 : static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
1658 3870912 : std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
1659 1935456 : return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
1660 : }
1661 :
1662 : static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
1663 : const TargetInstrInfo::RegSubRegPair &RHS) {
1664 13745764 : return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
1665 5023078 : RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
1666 : }
1667 : };
1668 :
1669 : } // end namespace llvm
1670 :
1671 : #endif // LLVM_TARGET_TARGETINSTRINFO_H
|