LLVM 22.0.0git
TargetInstrInfo.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the target machine instruction set to the code generator.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
14#define LLVM_CODEGEN_TARGETINSTRINFO_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Uniformity.h"
31#include "llvm/MC/MCInstrInfo.h"
36#include <array>
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <utility>
41#include <vector>
42
43namespace llvm {
44
45class DFAPacketizer;
47class LiveIntervals;
48class LiveVariables;
49class MachineLoop;
53class MCAsmInfo;
54class MCInst;
55struct MCSchedModel;
56class Module;
57class ScheduleDAG;
58class ScheduleDAGMI;
60class SDNode;
61class SelectionDAG;
62class SMSchedule;
64class RegScavenger;
69enum class MachineTraceStrategy;
70
71template <class T> class SmallVectorImpl;
72
73using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
74
78
80 : Destination(&Dest), Source(&Src) {}
81};
82
83/// Used to describe a register and immediate addition.
84struct RegImmPair {
86 int64_t Imm;
87
88 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
89};
90
91/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
92/// It holds the register values, the scale value and the displacement.
93/// It also holds a descriptor for the expression used to calculate the address
94/// from the operands.
96 enum class Formula {
97 Basic = 0, // BaseReg + ScaledReg * Scale + Displacement
98 SExtScaledReg = 1, // BaseReg + sext(ScaledReg) * Scale + Displacement
99 ZExtScaledReg = 2 // BaseReg + zext(ScaledReg) * Scale + Displacement
100 };
101
104 int64_t Scale = 0;
105 int64_t Displacement = 0;
107 ExtAddrMode() = default;
108};
109
110//---------------------------------------------------------------------------
111///
112/// TargetInstrInfo - Interface to description of machine instruction set
113///
115protected:
117
118 /// Subtarget specific sub-array of MCInstrInfo's RegClassByHwModeTables
119 /// (i.e. the table for the active HwMode). This should be indexed by
120 /// MCOperandInfo's RegClass field for LookupRegClassByHwMode operands.
121 const int16_t *const RegClassByHwMode;
122
123 TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode = ~0u,
124 unsigned CFDestroyOpcode = ~0u, unsigned CatchRetOpcode = ~0u,
125 unsigned ReturnOpcode = ~0u,
126 const int16_t *const RegClassByHwModeTable = nullptr)
127 : TRI(TRI), RegClassByHwMode(RegClassByHwModeTable),
128 CallFrameSetupOpcode(CFSetupOpcode),
129 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
130 ReturnOpcode(ReturnOpcode) {}
131
132public:
136
137 const TargetRegisterInfo &getRegisterInfo() const { return TRI; }
138
139 static bool isGenericOpcode(unsigned Opc) {
140 return Opc <= TargetOpcode::GENERIC_OP_END;
141 }
142
143 static bool isGenericAtomicRMWOpcode(unsigned Opc) {
144 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
145 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
146 }
147
148 /// \returns the subtarget appropriate RegClassID for \p OpInfo
149 ///
150 /// Note this shadows a version of getOpRegClassID in MCInstrInfo which takes
151 /// an additional argument for the subtarget's HwMode, since TargetInstrInfo
152 /// is owned by a subtarget in CodeGen but MCInstrInfo is a TargetMachine
153 /// constant.
154 int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const {
155 if (OpInfo.isLookupRegClassByHwMode())
156 return RegClassByHwMode[OpInfo.RegClass];
157 return OpInfo.RegClass;
158 }
159
160 /// Given a machine instruction descriptor, returns the register
161 /// class constraint for OpNum, or NULL.
162 virtual const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID,
163 unsigned OpNum) const;
164
165 /// Returns true if MI is an instruction we are unable to reason about
166 /// (like a call or something with unmodeled side effects).
167 virtual bool isGlobalMemoryObject(const MachineInstr *MI) const;
168
169 /// Return true if the instruction is trivially rematerializable, meaning it
170 /// has no side effects and requires no operands that aren't always available.
171 /// This means the only allowed uses are constants and unallocatable physical
172 /// registers so that the instructions result is independent of the place
173 /// in the function.
176 return false;
177 for (const MachineOperand &MO : MI.all_uses()) {
178 if (MO.getReg().isVirtual())
179 return false;
180 }
181 return true;
182 }
183
184 /// Return true if the instruction would be materializable at a point
185 /// in the containing function where all virtual register uses were
186 /// known to be live and available in registers.
187 bool isReMaterializable(const MachineInstr &MI) const {
188 return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF &&
189 MI.getNumOperands() == 1) ||
190 (MI.getDesc().isRematerializable() && isReMaterializableImpl(MI));
191 }
192
193 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
194 /// of instruction rematerialization or sinking.
195 virtual bool isIgnorableUse(const MachineOperand &MO) const {
196 return false;
197 }
198
199 virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo,
200 MachineCycleInfo *CI) const {
201 return true;
202 }
203
204 /// For a "cheap" instruction which doesn't enable additional sinking,
205 /// should MachineSink break a critical edge to sink it anyways?
207 return false;
208 }
209
210protected:
211 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
212 /// set, this hook lets the target specify whether the instruction is actually
213 /// rematerializable, taking into consideration its operands. This
214 /// predicate must return false if the instruction has any side effects other
215 /// than producing a value.
216 virtual bool isReMaterializableImpl(const MachineInstr &MI) const;
217
218 /// This method commutes the operands of the given machine instruction MI.
219 /// The operands to be commuted are specified by their indices OpIdx1 and
220 /// OpIdx2.
221 ///
222 /// If a target has any instructions that are commutable but require
223 /// converting to different instructions or making non-trivial changes
224 /// to commute them, this method can be overloaded to do that.
225 /// The default implementation simply swaps the commutable operands.
226 ///
227 /// If NewMI is false, MI is modified in place and returned; otherwise, a
228 /// new machine instruction is created and returned.
229 ///
230 /// Do not call this method for a non-commutable instruction.
231 /// Even though the instruction is commutable, the method may still
232 /// fail to commute the operands, null pointer is returned in such cases.
233 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
234 unsigned OpIdx1,
235 unsigned OpIdx2) const;
236
237 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
238 /// operand indices to (ResultIdx1, ResultIdx2).
239 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
240 /// predefined to some indices or be undefined (designated by the special
241 /// value 'CommuteAnyOperandIndex').
242 /// The predefined result indices cannot be re-defined.
243 /// The function returns true iff after the result pair redefinition
244 /// the fixed result pair is equal to or equivalent to the source pair of
245 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
246 /// the pairs (x,y) and (y,x) are equivalent.
247 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
248 unsigned CommutableOpIdx1,
249 unsigned CommutableOpIdx2);
250
251public:
252 /// These methods return the opcode of the frame setup/destroy instructions
253 /// if they exist (-1 otherwise). Some targets use pseudo instructions in
254 /// order to abstract away the difference between operating with a frame
255 /// pointer and operating without, through the use of these two instructions.
256 /// A FrameSetup MI in MF implies MFI::AdjustsStack.
257 ///
258 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
259 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
260
261 /// Returns true if the argument is a frame pseudo instruction.
262 bool isFrameInstr(const MachineInstr &I) const {
263 return I.getOpcode() == getCallFrameSetupOpcode() ||
264 I.getOpcode() == getCallFrameDestroyOpcode();
265 }
266
267 /// Returns true if the argument is a frame setup pseudo instruction.
268 bool isFrameSetup(const MachineInstr &I) const {
269 return I.getOpcode() == getCallFrameSetupOpcode();
270 }
271
272 /// Returns size of the frame associated with the given frame instruction.
273 /// For frame setup instruction this is frame that is set up space set up
274 /// after the instruction. For frame destroy instruction this is the frame
275 /// freed by the caller.
276 /// Note, in some cases a call frame (or a part of it) may be prepared prior
277 /// to the frame setup instruction. It occurs in the calls that involve
278 /// inalloca arguments. This function reports only the size of the frame part
279 /// that is set up between the frame setup and destroy pseudo instructions.
280 int64_t getFrameSize(const MachineInstr &I) const {
281 assert(isFrameInstr(I) && "Not a frame instruction");
282 assert(I.getOperand(0).getImm() >= 0);
283 return I.getOperand(0).getImm();
284 }
285
286 /// Returns the total frame size, which is made up of the space set up inside
287 /// the pair of frame start-stop instructions and the space that is set up
288 /// prior to the pair.
289 int64_t getFrameTotalSize(const MachineInstr &I) const {
290 if (isFrameSetup(I)) {
291 assert(I.getOperand(1).getImm() >= 0 &&
292 "Frame size must not be negative");
293 return getFrameSize(I) + I.getOperand(1).getImm();
294 }
295 return getFrameSize(I);
296 }
297
298 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
299 unsigned getReturnOpcode() const { return ReturnOpcode; }
300
301 /// Returns the actual stack pointer adjustment made by an instruction
302 /// as part of a call sequence. By default, only call frame setup/destroy
303 /// instructions adjust the stack, but targets may want to override this
304 /// to enable more fine-grained adjustment, or adjust by a different value.
305 virtual int getSPAdjust(const MachineInstr &MI) const;
306
307 /// Return true if the instruction is a "coalescable" extension instruction.
308 /// That is, it's like a copy where it's legal for the source to overlap the
309 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
310 /// expected the pre-extension value is available as a subreg of the result
311 /// register. This also returns the sub-register index in SubIdx.
312 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
313 Register &DstReg, unsigned &SubIdx) const {
314 return false;
315 }
316
317 /// If the specified machine instruction is a direct
318 /// load from a stack slot, return the virtual or physical register number of
319 /// the destination along with the FrameIndex of the loaded stack slot. If
320 /// not, return 0. This predicate must return 0 if the instruction has
321 /// any side effects other than loading from the stack slot.
323 int &FrameIndex) const {
324 return 0;
325 }
326
327 /// Optional extension of isLoadFromStackSlot that returns the number of
328 /// bytes loaded from the stack. This must be implemented if a backend
329 /// supports partial stack slot spills/loads to further disambiguate
330 /// what the load does.
332 int &FrameIndex,
333 TypeSize &MemBytes) const {
334 MemBytes = TypeSize::getZero();
335 return isLoadFromStackSlot(MI, FrameIndex);
336 }
337
338 /// Check for post-frame ptr elimination stack locations as well.
339 /// This uses a heuristic so it isn't reliable for correctness.
341 int &FrameIndex) const {
342 return 0;
343 }
344
345 /// If the specified machine instruction has a load from a stack slot,
346 /// return true along with the FrameIndices of the loaded stack slot and the
347 /// machine mem operands containing the reference.
348 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
349 /// any instructions that loads from the stack. This is just a hint, as some
350 /// cases may be missed.
351 virtual bool hasLoadFromStackSlot(
352 const MachineInstr &MI,
354
355 /// If the specified machine instruction is a direct
356 /// store to a stack slot, return the virtual or physical register number of
357 /// the source reg along with the FrameIndex of the loaded stack slot. If
358 /// not, return 0. This predicate must return 0 if the instruction has
359 /// any side effects other than storing to the stack slot.
361 int &FrameIndex) const {
362 return 0;
363 }
364
365 /// Optional extension of isStoreToStackSlot that returns the number of
366 /// bytes stored to the stack. This must be implemented if a backend
367 /// supports partial stack slot spills/loads to further disambiguate
368 /// what the store does.
370 int &FrameIndex,
371 TypeSize &MemBytes) const {
372 MemBytes = TypeSize::getZero();
373 return isStoreToStackSlot(MI, FrameIndex);
374 }
375
376 /// Check for post-frame ptr elimination stack locations as well.
377 /// This uses a heuristic, so it isn't reliable for correctness.
379 int &FrameIndex) const {
380 return 0;
381 }
382
383 /// If the specified machine instruction has a store to a stack slot,
384 /// return true along with the FrameIndices of the loaded stack slot and the
385 /// machine mem operands containing the reference.
386 /// If not, return false. Unlike isStoreToStackSlot,
387 /// this returns true for any instructions that stores to the
388 /// stack. This is just a hint, as some cases may be missed.
389 virtual bool hasStoreToStackSlot(
390 const MachineInstr &MI,
392
393 /// Return true if the specified machine instruction
394 /// is a copy of one stack slot to another and has no other effect.
395 /// Provide the identity of the two frame indices.
396 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
397 int &SrcFrameIndex) const {
398 return false;
399 }
400
401 /// Compute the size in bytes and offset within a stack slot of a spilled
402 /// register or subregister.
403 ///
404 /// \param [out] Size in bytes of the spilled value.
405 /// \param [out] Offset in bytes within the stack slot.
406 /// \returns true if both Size and Offset are successfully computed.
407 ///
408 /// Not all subregisters have computable spill slots. For example,
409 /// subregisters registers may not be byte-sized, and a pair of discontiguous
410 /// subregisters has no single offset.
411 ///
412 /// Targets with nontrivial bigendian implementations may need to override
413 /// this, particularly to support spilled vector registers.
414 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
415 unsigned &Size, unsigned &Offset,
416 const MachineFunction &MF) const;
417
418 /// Return true if the given instruction is terminator that is unspillable,
419 /// according to isUnspillableTerminatorImpl.
421 return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
422 }
423
424 /// Returns the size in bytes of the specified MachineInstr, or ~0U
425 /// when this function is not implemented by a target.
426 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
427 return ~0U;
428 }
429
430 /// Return true if the instruction is as cheap as a move instruction.
431 ///
432 /// Targets for different archs need to override this, and different
433 /// micro-architectures can also be finely tuned inside.
434 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
435 return MI.isAsCheapAsAMove();
436 }
437
438 /// Return true if the instruction should be sunk by MachineSink.
439 ///
440 /// MachineSink determines on its own whether the instruction is safe to sink;
441 /// this gives the target a hook to override the default behavior with regards
442 /// to which instructions should be sunk.
443 ///
444 /// shouldPostRASink() is used by PostRAMachineSink.
445 virtual bool shouldSink(const MachineInstr &MI) const { return true; }
446 virtual bool shouldPostRASink(const MachineInstr &MI) const { return true; }
447
448 /// Return false if the instruction should not be hoisted by MachineLICM.
449 ///
450 /// MachineLICM determines on its own whether the instruction is safe to
451 /// hoist; this gives the target a hook to extend this assessment and prevent
452 /// an instruction being hoisted from a given loop for target specific
453 /// reasons.
454 virtual bool shouldHoist(const MachineInstr &MI,
455 const MachineLoop *FromLoop) const {
456 return true;
457 }
458
459 /// Re-issue the specified 'original' instruction at the
460 /// specific location targeting a new destination register.
461 /// The register in Orig->getOperand(0).getReg() will be substituted by
462 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
463 /// SubIdx.
464 virtual void reMaterialize(MachineBasicBlock &MBB,
466 unsigned SubIdx, const MachineInstr &Orig) const;
467
468 /// Clones instruction or the whole instruction bundle \p Orig and
469 /// insert into \p MBB before \p InsertBefore. The target may update operands
470 /// that are required to be unique.
471 ///
472 /// \p Orig must not return true for MachineInstr::isNotDuplicable().
473 virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
474 MachineBasicBlock::iterator InsertBefore,
475 const MachineInstr &Orig) const;
476
477 /// This method must be implemented by targets that
478 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
479 /// may be able to convert a two-address instruction into one or more true
480 /// three-address instructions on demand. This allows the X86 target (for
481 /// example) to convert ADD and SHL instructions into LEA instructions if they
482 /// would require register copies due to two-addressness.
483 ///
484 /// This method returns a null pointer if the transformation cannot be
485 /// performed, otherwise it returns the last new instruction.
486 ///
487 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
488 /// replacing \p MI with new instructions, even though this function does not
489 /// remove MI.
491 LiveVariables *LV,
492 LiveIntervals *LIS) const {
493 return nullptr;
494 }
495
496 // This constant can be used as an input value of operand index passed to
497 // the method findCommutedOpIndices() to tell the method that the
498 // corresponding operand index is not pre-defined and that the method
499 // can pick any commutable operand.
500 static const unsigned CommuteAnyOperandIndex = ~0U;
501
502 /// This method commutes the operands of the given machine instruction MI.
503 ///
504 /// The operands to be commuted are specified by their indices OpIdx1 and
505 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
506 /// 'CommuteAnyOperandIndex', which means that the method is free to choose
507 /// any arbitrarily chosen commutable operand. If both arguments are set to
508 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
509 /// operands; then commutes them if such operands could be found.
510 ///
511 /// If NewMI is false, MI is modified in place and returned; otherwise, a
512 /// new machine instruction is created and returned.
513 ///
514 /// Do not call this method for a non-commutable instruction or
515 /// for non-commuable operands.
516 /// Even though the instruction is commutable, the method may still
517 /// fail to commute the operands, null pointer is returned in such cases.
519 commuteInstruction(MachineInstr &MI, bool NewMI = false,
520 unsigned OpIdx1 = CommuteAnyOperandIndex,
521 unsigned OpIdx2 = CommuteAnyOperandIndex) const;
522
523 /// Returns true iff the routine could find two commutable operands in the
524 /// given machine instruction.
525 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
526 /// If any of the INPUT values is set to the special value
527 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
528 /// operand, then returns its index in the corresponding argument.
529 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
530 /// looks for 2 commutable operands.
531 /// If INPUT values refer to some operands of MI, then the method simply
532 /// returns true if the corresponding operands are commutable and returns
533 /// false otherwise.
534 ///
535 /// For example, calling this method this way:
536 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
537 /// findCommutedOpIndices(MI, Op1, Op2);
538 /// can be interpreted as a query asking to find an operand that would be
539 /// commutable with the operand#1.
540 virtual bool findCommutedOpIndices(const MachineInstr &MI,
541 unsigned &SrcOpIdx1,
542 unsigned &SrcOpIdx2) const;
543
544 /// Returns true if the target has a preference on the operands order of
545 /// the given machine instruction. And specify if \p Commute is required to
546 /// get the desired operands order.
547 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
548 return false;
549 }
550
551 /// If possible, converts the instruction to a simplified/canonical form.
552 /// Returns true if the instruction was modified.
553 ///
554 /// This function is only called after register allocation. The MI will be
555 /// modified in place. This is called by passes such as
556 /// MachineCopyPropagation, where their mutation of the MI operands may
557 /// expose opportunities to convert the instruction to a simpler form (e.g.
558 /// a load of 0).
559 virtual bool simplifyInstruction(MachineInstr &MI) const { return false; }
560
561 /// A pair composed of a register and a sub-register index.
562 /// Used to give some type checking when modeling Reg:SubReg.
565 unsigned SubReg;
566
568 : Reg(Reg), SubReg(SubReg) {}
569
570 bool operator==(const RegSubRegPair& P) const {
571 return Reg == P.Reg && SubReg == P.SubReg;
572 }
573 bool operator!=(const RegSubRegPair& P) const {
574 return !(*this == P);
575 }
576 };
577
578 /// A pair composed of a pair of a register and a sub-register index,
579 /// and another sub-register index.
580 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
582 unsigned SubIdx;
583
585 unsigned SubIdx = 0)
587 };
588
589 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
590 /// and \p DefIdx.
591 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
592 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
593 /// flag are not added to this list.
594 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
595 /// two elements:
596 /// - %1:sub1, sub0
597 /// - %2<:0>, sub1
598 ///
599 /// \returns true if it is possible to build such an input sequence
600 /// with the pair \p MI, \p DefIdx. False otherwise.
601 ///
602 /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
603 ///
604 /// \note The generic implementation does not provide any support for
605 /// MI.isRegSequenceLike(). In other words, one has to override
606 /// getRegSequenceLikeInputs for target specific instructions.
607 bool
608 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
609 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
610
611 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
612 /// and \p DefIdx.
613 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
614 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
615 /// - %1:sub1, sub0
616 ///
617 /// \returns true if it is possible to build such an input sequence
618 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
619 /// False otherwise.
620 ///
621 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
622 ///
623 /// \note The generic implementation does not provide any support for
624 /// MI.isExtractSubregLike(). In other words, one has to override
625 /// getExtractSubregLikeInputs for target specific instructions.
626 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
627 RegSubRegPairAndIdx &InputReg) const;
628
629 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
630 /// and \p DefIdx.
631 /// \p [out] BaseReg and \p [out] InsertedReg contain
632 /// the equivalent inputs of INSERT_SUBREG.
633 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
634 /// - BaseReg: %0:sub0
635 /// - InsertedReg: %1:sub1, sub3
636 ///
637 /// \returns true if it is possible to build such an input sequence
638 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
639 /// False otherwise.
640 ///
641 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
642 ///
643 /// \note The generic implementation does not provide any support for
644 /// MI.isInsertSubregLike(). In other words, one has to override
645 /// getInsertSubregLikeInputs for target specific instructions.
646 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
647 RegSubRegPair &BaseReg,
648 RegSubRegPairAndIdx &InsertedReg) const;
649
650 /// Return true if two machine instructions would produce identical values.
651 /// By default, this is only true when the two instructions
652 /// are deemed identical except for defs. If this function is called when the
653 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
654 /// aggressive checks.
655 virtual bool produceSameValue(const MachineInstr &MI0,
656 const MachineInstr &MI1,
657 const MachineRegisterInfo *MRI = nullptr) const;
658
659 /// \returns true if a branch from an instruction with opcode \p BranchOpc
660 /// bytes is capable of jumping to a position \p BrOffset bytes away.
661 virtual bool isBranchOffsetInRange(unsigned BranchOpc,
662 int64_t BrOffset) const {
663 llvm_unreachable("target did not implement");
664 }
665
666 /// \returns The block that branch instruction \p MI jumps to.
668 llvm_unreachable("target did not implement");
669 }
670
671 /// Insert an unconditional indirect branch at the end of \p MBB to \p
672 /// NewDestBB. Optionally, insert the clobbered register restoring in \p
673 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
674 /// the offset of the position to insert the new branch.
676 MachineBasicBlock &NewDestBB,
677 MachineBasicBlock &RestoreBB,
678 const DebugLoc &DL, int64_t BrOffset = 0,
679 RegScavenger *RS = nullptr) const {
680 llvm_unreachable("target did not implement");
681 }
682
683 /// Analyze the branching code at the end of MBB, returning
684 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
685 /// implemented for a target). Upon success, this returns false and returns
686 /// with the following information in various cases:
687 ///
688 /// 1. If this block ends with no branches (it just falls through to its succ)
689 /// just return false, leaving TBB/FBB null.
690 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
691 /// the destination block.
692 /// 3. If this block ends with a conditional branch and it falls through to a
693 /// successor block, it sets TBB to be the branch destination block and a
694 /// list of operands that evaluate the condition. These operands can be
695 /// passed to other TargetInstrInfo methods to create new branches.
696 /// 4. If this block ends with a conditional branch followed by an
697 /// unconditional branch, it returns the 'true' destination in TBB, the
698 /// 'false' destination in FBB, and a list of operands that evaluate the
699 /// condition. These operands can be passed to other TargetInstrInfo
700 /// methods to create new branches.
701 ///
702 /// Note that removeBranch and insertBranch must be implemented to support
703 /// cases where this method returns success.
704 ///
705 /// If AllowModify is true, then this routine is allowed to modify the basic
706 /// block (e.g. delete instructions after the unconditional branch).
707 ///
708 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
709 /// before calling this function.
711 MachineBasicBlock *&FBB,
713 bool AllowModify = false) const {
714 return true;
715 }
716
717 /// Represents a predicate at the MachineFunction level. The control flow a
718 /// MachineBranchPredicate represents is:
719 ///
720 /// Reg = LHS `Predicate` RHS == ConditionDef
721 /// if Reg then goto TrueDest else goto FalseDest
722 ///
725 PRED_EQ, // True if two values are equal
726 PRED_NE, // True if two values are not equal
727 PRED_INVALID // Sentinel value
728 };
729
736
737 /// SingleUseCondition is true if ConditionDef is dead except for the
738 /// branch(es) at the end of the basic block.
739 ///
740 bool SingleUseCondition = false;
741
742 explicit MachineBranchPredicate() = default;
743 };
744
745 /// Analyze the branching code at the end of MBB and parse it into the
746 /// MachineBranchPredicate structure if possible. Returns false on success
747 /// and true on failure.
748 ///
749 /// If AllowModify is true, then this routine is allowed to modify the basic
750 /// block (e.g. delete instructions after the unconditional branch).
751 ///
754 bool AllowModify = false) const {
755 return true;
756 }
757
758 /// Remove the branching code at the end of the specific MBB.
759 /// This is only invoked in cases where analyzeBranch returns success. It
760 /// returns the number of instructions that were removed.
761 /// If \p BytesRemoved is non-null, report the change in code size from the
762 /// removed instructions.
764 int *BytesRemoved = nullptr) const {
765 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
766 }
767
768 /// Insert branch code into the end of the specified MachineBasicBlock. The
769 /// operands to this method are the same as those returned by analyzeBranch.
770 /// This is only invoked in cases where analyzeBranch returns success. It
771 /// returns the number of instructions inserted. If \p BytesAdded is non-null,
772 /// report the change in code size from the added instructions.
773 ///
774 /// It is also invoked by tail merging to add unconditional branches in
775 /// cases where analyzeBranch doesn't apply because there was no original
776 /// branch to analyze. At least this much must be implemented, else tail
777 /// merging needs to be disabled.
778 ///
779 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
780 /// before calling this function.
784 const DebugLoc &DL,
785 int *BytesAdded = nullptr) const {
786 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
787 }
788
790 MachineBasicBlock *DestBB,
791 const DebugLoc &DL,
792 int *BytesAdded = nullptr) const {
793 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
794 BytesAdded);
795 }
796
797 /// Object returned by analyzeLoopForPipelining. Allows software pipelining
798 /// implementations to query attributes of the loop being pipelined and to
799 /// apply target-specific updates to the loop once pipelining is complete.
801 public:
803 /// Return true if the given instruction should not be pipelined and should
804 /// be ignored. An example could be a loop comparison, or induction variable
805 /// update with no users being pipelined.
806 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
807
808 /// Return true if the proposed schedule should used. Otherwise return
809 /// false to not pipeline the loop. This function should be used to ensure
810 /// that pipelined loops meet target-specific quality heuristics.
812 return true;
813 }
814
815 /// Create a condition to determine if the trip count of the loop is greater
816 /// than TC, where TC is always one more than for the previous prologue or
817 /// 0 if this is being called for the outermost prologue.
818 ///
819 /// If the trip count is statically known to be greater than TC, return
820 /// true. If the trip count is statically known to be not greater than TC,
821 /// return false. Otherwise return nullopt and fill out Cond with the test
822 /// condition.
823 ///
824 /// Note: This hook is guaranteed to be called from the innermost to the
825 /// outermost prologue of the loop being software pipelined.
826 virtual std::optional<bool>
829
830 /// Create a condition to determine if the remaining trip count for a phase
831 /// is greater than TC. Some instructions such as comparisons may be
832 /// inserted at the bottom of MBB. All instructions expanded for the
833 /// phase must be inserted in MBB before calling this function.
834 /// LastStage0Insts is the map from the original instructions scheduled at
835 /// stage#0 to the expanded instructions for the last iteration of the
836 /// kernel. LastStage0Insts is intended to obtain the instruction that
837 /// refers the latest loop counter value.
838 ///
839 /// MBB can also be a predecessor of the prologue block. Then
840 /// LastStage0Insts must be empty and the compared value is the initial
841 /// value of the trip count.
846 "Target didn't implement "
847 "PipelinerLoopInfo::createRemainingIterationsGreaterCondition!");
848 }
849
850 /// Modify the loop such that the trip count is
851 /// OriginalTC + TripCountAdjust.
852 virtual void adjustTripCount(int TripCountAdjust) = 0;
853
854 /// Called when the loop's preheader has been modified to NewPreheader.
855 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
856
857 /// Called when the loop is being removed. Any instructions in the preheader
858 /// should be removed.
859 ///
860 /// Once this function is called, no other functions on this object are
861 /// valid; the loop has been removed.
862 virtual void disposed(LiveIntervals *LIS = nullptr) {}
863
864 /// Return true if the target can expand pipelined schedule with modulo
865 /// variable expansion.
866 virtual bool isMVEExpanderSupported() { return false; }
867 };
868
869 /// Analyze loop L, which must be a single-basic-block loop, and if the
870 /// conditions can be understood enough produce a PipelinerLoopInfo object.
871 virtual std::unique_ptr<PipelinerLoopInfo>
873 return nullptr;
874 }
875
876 /// Analyze the loop code, return true if it cannot be understood. Upon
877 /// success, this function returns false and returns information about the
878 /// induction variable and compare instruction used at the end.
879 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
880 MachineInstr *&CmpInst) const {
881 return true;
882 }
883
884 /// Generate code to reduce the loop iteration by one and check if the loop
885 /// is finished. Return the value/register of the new loop count. We need
886 /// this function when peeling off one or more iterations of a loop. This
887 /// function assumes the nth iteration is peeled first.
889 MachineBasicBlock &PreHeader,
890 MachineInstr *IndVar, MachineInstr &Cmp,
893 unsigned Iter, unsigned MaxIter) const {
894 llvm_unreachable("Target didn't implement ReduceLoopCount");
895 }
896
897 /// Delete the instruction OldInst and everything after it, replacing it with
898 /// an unconditional branch to NewDest. This is used by the tail merging pass.
899 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
900 MachineBasicBlock *NewDest) const;
901
902 /// Return true if it's legal to split the given basic
903 /// block at the specified instruction (i.e. instruction would be the start
904 /// of a new basic block).
907 return true;
908 }
909
910 /// Return true if it's profitable to predicate
911 /// instructions with accumulated instruction latency of "NumCycles"
912 /// of the specified basic block, where the probability of the instructions
913 /// being executed is given by Probability, and Confidence is a measure
914 /// of our confidence that it will be properly predicted.
915 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
916 unsigned ExtraPredCycles,
917 BranchProbability Probability) const {
918 return false;
919 }
920
921 /// Second variant of isProfitableToIfCvt. This one
922 /// checks for the case where two basic blocks from true and false path
923 /// of a if-then-else (diamond) are predicated on mutually exclusive
924 /// predicates, where the probability of the true path being taken is given
925 /// by Probability, and Confidence is a measure of our confidence that it
926 /// will be properly predicted.
927 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
928 unsigned ExtraTCycles,
929 MachineBasicBlock &FMBB, unsigned NumFCycles,
930 unsigned ExtraFCycles,
931 BranchProbability Probability) const {
932 return false;
933 }
934
935 /// Return true if it's profitable for if-converter to duplicate instructions
936 /// of specified accumulated instruction latencies in the specified MBB to
937 /// enable if-conversion.
938 /// The probability of the instructions being executed is given by
939 /// Probability, and Confidence is a measure of our confidence that it
940 /// will be properly predicted.
942 unsigned NumCycles,
943 BranchProbability Probability) const {
944 return false;
945 }
946
947 /// Return the increase in code size needed to predicate a contiguous run of
948 /// NumInsts instructions.
950 unsigned NumInsts) const {
951 return 0;
952 }
953
954 /// Return an estimate for the code size reduction (in bytes) which will be
955 /// caused by removing the given branch instruction during if-conversion.
956 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
957 return getInstSizeInBytes(MI);
958 }
959
960 /// Return true if it's profitable to unpredicate
961 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
962 /// exclusive predicates.
963 /// e.g.
964 /// subeq r0, r1, #1
965 /// addne r0, r1, #1
966 /// =>
967 /// sub r0, r1, #1
968 /// addne r0, r1, #1
969 ///
970 /// This may be profitable is conditional instructions are always executed.
972 MachineBasicBlock &FMBB) const {
973 return false;
974 }
975
976 /// Return true if it is possible to insert a select
977 /// instruction that chooses between TrueReg and FalseReg based on the
978 /// condition code in Cond.
979 ///
980 /// When successful, also return the latency in cycles from TrueReg,
981 /// FalseReg, and Cond to the destination register. In most cases, a select
982 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
983 ///
984 /// Some x86 implementations have 2-cycle cmov instructions.
985 ///
986 /// @param MBB Block where select instruction would be inserted.
987 /// @param Cond Condition returned by analyzeBranch.
988 /// @param DstReg Virtual dest register that the result should write to.
989 /// @param TrueReg Virtual register to select when Cond is true.
990 /// @param FalseReg Virtual register to select when Cond is false.
991 /// @param CondCycles Latency from Cond+Branch to select output.
992 /// @param TrueCycles Latency from TrueReg to select output.
993 /// @param FalseCycles Latency from FalseReg to select output.
996 Register TrueReg, Register FalseReg,
997 int &CondCycles, int &TrueCycles,
998 int &FalseCycles) const {
999 return false;
1000 }
1001
1002 /// Insert a select instruction into MBB before I that will copy TrueReg to
1003 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
1004 ///
1005 /// This function can only be called after canInsertSelect() returned true.
1006 /// The condition in Cond comes from analyzeBranch, and it can be assumed
1007 /// that the same flags or registers required by Cond are available at the
1008 /// insertion point.
1009 ///
1010 /// @param MBB Block where select instruction should be inserted.
1011 /// @param I Insertion point.
1012 /// @param DL Source location for debugging.
1013 /// @param DstReg Virtual register to be defined by select instruction.
1014 /// @param Cond Condition as computed by analyzeBranch.
1015 /// @param TrueReg Virtual register to copy when Cond is true.
1016 /// @param FalseReg Virtual register to copy when Cons is false.
1020 Register TrueReg, Register FalseReg) const {
1021 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
1022 }
1023
1024 /// Analyze the given select instruction, returning true if
1025 /// it cannot be understood. It is assumed that MI->isSelect() is true.
1026 ///
1027 /// When successful, return the controlling condition and the operands that
1028 /// determine the true and false result values.
1029 ///
1030 /// Result = SELECT Cond, TrueOp, FalseOp
1031 ///
1032 /// Some targets can optimize select instructions, for example by predicating
1033 /// the instruction defining one of the operands. Such targets should set
1034 /// Optimizable.
1035 ///
1036 /// @param MI Select instruction to analyze.
1037 /// @param Cond Condition controlling the select.
1038 /// @param TrueOp Operand number of the value selected when Cond is true.
1039 /// @param FalseOp Operand number of the value selected when Cond is false.
1040 /// @param Optimizable Returned as true if MI is optimizable.
1041 /// @returns False on success.
1042 virtual bool analyzeSelect(const MachineInstr &MI,
1044 unsigned &TrueOp, unsigned &FalseOp,
1045 bool &Optimizable) const {
1046 assert(MI.getDesc().isSelect() && "MI must be a select instruction");
1047 return true;
1048 }
1049
1050 /// Given a select instruction that was understood by
1051 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
1052 /// merging it with one of its operands. Returns NULL on failure.
1053 ///
1054 /// When successful, returns the new select instruction. The client is
1055 /// responsible for deleting MI.
1056 ///
1057 /// If both sides of the select can be optimized, PreferFalse is used to pick
1058 /// a side.
1059 ///
1060 /// @param MI Optimizable select instruction.
1061 /// @param NewMIs Set that record all MIs in the basic block up to \p
1062 /// MI. Has to be updated with any newly created MI or deleted ones.
1063 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
1064 /// @returns Optimized instruction or NULL.
1067 bool PreferFalse = false) const {
1068 // This function must be implemented if Optimizable is ever set.
1069 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
1070 }
1071
1072 /// Emit instructions to copy a pair of physical registers.
1073 ///
1074 /// This function should support copies within any legal register class as
1075 /// well as any cross-class copies created during instruction selection.
1076 ///
1077 /// The source and destination registers may overlap, which may require a
1078 /// careful implementation when multiple copy instructions are required for
1079 /// large registers. See for example the ARM target.
1080 ///
1081 /// If RenamableDest is true, the copy instruction's destination operand is
1082 /// marked renamable.
1083 /// If RenamableSrc is true, the copy instruction's source operand is
1084 /// marked renamable.
1087 Register DestReg, Register SrcReg, bool KillSrc,
1088 bool RenamableDest = false,
1089 bool RenamableSrc = false) const {
1090 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
1091 }
1092
1093 /// Allow targets to tell MachineVerifier whether a specific register
1094 /// MachineOperand can be used as part of PC-relative addressing.
1095 /// PC-relative addressing modes in many CISC architectures contain
1096 /// (non-PC) registers as offsets or scaling values, which inherently
1097 /// tags the corresponding MachineOperand with OPERAND_PCREL.
1098 ///
1099 /// @param MO The MachineOperand in question. MO.isReg() should always
1100 /// be true.
1101 /// @return Whether this operand is allowed to be used PC-relatively.
1102 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
1103 return false;
1104 }
1105
1106 /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump
1107 /// using a jump table, otherwise -1.
1108 virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; }
1109
1110protected:
1111 /// Target-dependent implementation for IsCopyInstr.
1112 /// If the specific machine instruction is a instruction that moves/copies
1113 /// value from one register to another register return destination and source
1114 /// registers as machine operands.
1115 virtual std::optional<DestSourcePair>
1117 return std::nullopt;
1118 }
1119
1120 virtual std::optional<DestSourcePair>
1122 return std::nullopt;
1123 }
1124
1125 /// Return true if the given terminator MI is not expected to spill. This
1126 /// sets the live interval as not spillable and adjusts phi node lowering to
1127 /// not introduce copies after the terminator. Use with care, these are
1128 /// currently used for hardware loop intrinsics in very controlled situations,
1129 /// created prior to registry allocation in loops that only have single phi
1130 /// users for the terminators value. They may run out of registers if not used
1131 /// carefully.
1132 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
1133 return false;
1134 }
1135
1136public:
1137 /// If the specific machine instruction is a instruction that moves/copies
1138 /// value from one register to another register return destination and source
1139 /// registers as machine operands.
1140 /// For COPY-instruction the method naturally returns destination and source
1141 /// registers as machine operands, for all other instructions the method calls
1142 /// target-dependent implementation.
1143 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
1144 if (MI.isCopy()) {
1145 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1146 }
1147 return isCopyInstrImpl(MI);
1148 }
1149
1150 // Similar to `isCopyInstr`, but adds non-copy semantics on MIR, but
1151 // ultimately generates a copy instruction.
1152 std::optional<DestSourcePair> isCopyLikeInstr(const MachineInstr &MI) const {
1153 if (auto IsCopyInstr = isCopyInstr(MI))
1154 return IsCopyInstr;
1155 return isCopyLikeInstrImpl(MI);
1156 }
1157
1158 bool isFullCopyInstr(const MachineInstr &MI) const {
1159 auto DestSrc = isCopyInstr(MI);
1160 if (!DestSrc)
1161 return false;
1162
1163 const MachineOperand *DestRegOp = DestSrc->Destination;
1164 const MachineOperand *SrcRegOp = DestSrc->Source;
1165 return !DestRegOp->getSubReg() && !SrcRegOp->getSubReg();
1166 }
1167
1168 /// If the specific machine instruction is an instruction that adds an
1169 /// immediate value and a register, and stores the result in the given
1170 /// register \c Reg, return a pair of the source register and the offset
1171 /// which has been added.
1172 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
1173 Register Reg) const {
1174 return std::nullopt;
1175 }
1176
1177 /// Returns true if MI is an instruction that defines Reg to have a constant
1178 /// value and the value is recorded in ImmVal. The ImmVal is a result that
1179 /// should be interpreted as modulo size of Reg.
1181 const Register Reg,
1182 int64_t &ImmVal) const {
1183 return false;
1184 }
1185
1186 /// Store the specified register of the given register class to the specified
1187 /// stack frame index. The store instruction is to be added to the given
1188 /// machine basic block before the specified machine instruction. If isKill
1189 /// is true, the register operand is the last use and must be marked kill. If
1190 /// \p SrcReg is being directly spilled as part of assigning a virtual
1191 /// register, \p VReg is the register being assigned. This additional register
1192 /// argument is needed for certain targets when invoked from RegAllocFast to
1193 /// map the spilled physical register to its virtual register. A null register
1194 /// can be passed elsewhere. The \p Flags is used to set appropriate machine
1195 /// flags on the spill instruction e.g. FrameSetup flag on a callee saved
1196 /// register spill instruction, part of prologue, during the frame lowering.
1199 bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
1201 llvm_unreachable("Target didn't implement "
1202 "TargetInstrInfo::storeRegToStackSlot!");
1203 }
1204
1205 /// Load the specified register of the given register class from the specified
1206 /// stack frame index. The load instruction is to be added to the given
1207 /// machine basic block before the specified machine instruction. If \p
1208 /// DestReg is being directly reloaded as part of assigning a virtual
1209 /// register, \p VReg is the register being assigned. This additional register
1210 /// argument is needed for certain targets when invoked from RegAllocFast to
1211 /// map the loaded physical register to its virtual register. A null register
1212 /// can be passed elsewhere. \p SubReg is required for partial reload of
1213 /// tuples if the target supports it. The \p Flags is used to set appropriate
1214 /// machine flags on the spill instruction e.g. FrameDestroy flag on a callee
1215 /// saved register reload instruction, part of epilogue, during the frame
1216 /// lowering.
1219 int FrameIndex, const TargetRegisterClass *RC, Register VReg,
1220 unsigned SubReg = 0,
1222 llvm_unreachable("Target didn't implement "
1223 "TargetInstrInfo::loadRegFromStackSlot!");
1224 }
1225
1226 /// This function is called for all pseudo instructions
1227 /// that remain after register allocation. Many pseudo instructions are
1228 /// created to help register allocation. This is the place to convert them
1229 /// into real instructions. The target can edit MI in place, or it can insert
1230 /// new instructions and erase MI. The function should return true if
1231 /// anything was changed.
1232 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
1233
1234 /// Check whether the target can fold a load that feeds a subreg operand
1235 /// (or a subreg operand that feeds a store).
1236 /// For example, X86 may want to return true if it can fold
1237 /// movl (%esp), %eax
1238 /// subb, %al, ...
1239 /// Into:
1240 /// subb (%esp), ...
1241 ///
1242 /// Ideally, we'd like the target implementation of foldMemoryOperand() to
1243 /// reject subregs - but since this behavior used to be enforced in the
1244 /// target-independent code, moving this responsibility to the targets
1245 /// has the potential of causing nasty silent breakage in out-of-tree targets.
1246 virtual bool isSubregFoldable() const { return false; }
1247
1248 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
1249 /// operands which can't be folded into stack references. Operands outside
1250 /// of the range are most likely foldable but it is not guaranteed.
1251 /// These instructions are unique in that stack references for some operands
1252 /// have the same execution cost (e.g. none) as the unfolded register forms.
1253 /// The ranged return is guaranteed to include all operands which can't be
1254 /// folded at zero cost.
1255 virtual std::pair<unsigned, unsigned>
1256 getPatchpointUnfoldableRange(const MachineInstr &MI) const;
1257
1258 /// Attempt to fold a load or store of the specified stack
1259 /// slot into the specified machine instruction for the specified operand(s).
1260 /// If this is possible, a new instruction is returned with the specified
1261 /// operand folded, otherwise NULL is returned.
1262 /// The new instruction is inserted before MI, and the client is responsible
1263 /// for removing the old instruction.
1264 /// If VRM is passed, the assigned physregs can be inspected by target to
1265 /// decide on using an opcode (note that those assignments can still change).
1266 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1267 int FI,
1268 LiveIntervals *LIS = nullptr,
1269 VirtRegMap *VRM = nullptr) const;
1270
1271 /// Same as the previous version except it allows folding of any load and
1272 /// store from / to any address, not just from a specific stack slot.
1273 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1274 MachineInstr &LoadMI,
1275 LiveIntervals *LIS = nullptr) const;
1276
1277 /// This function defines the logic to lower COPY instruction to
1278 /// target specific instruction(s).
1279 void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const;
1280
1281 /// Return true when there is potentially a faster code sequence
1282 /// for an instruction chain ending in \p Root. All potential patterns are
1283 /// returned in the \p Patterns vector. Patterns should be sorted in priority
1284 /// order since the pattern evaluator stops checking as soon as it finds a
1285 /// faster sequence.
1286 /// \param Root - Instruction that could be combined with one of its operands
1287 /// \param Patterns - Vector of possible combination patterns
1288 virtual bool getMachineCombinerPatterns(MachineInstr &Root,
1289 SmallVectorImpl<unsigned> &Patterns,
1290 bool DoRegPressureReduce) const;
1291
1292 /// Return true if target supports reassociation of instructions in machine
1293 /// combiner pass to reduce register pressure for a given BB.
1294 virtual bool
1296 const RegisterClassInfo *RegClassInfo) const {
1297 return false;
1298 }
1299
1300 /// Fix up the placeholder we may add in genAlternativeCodeSequence().
1301 virtual void
1303 SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
1304
1305 /// Return true when a code sequence can improve throughput. It
1306 /// should be called only for instructions in loops.
1307 /// \param Pattern - combiner pattern
1308 virtual bool isThroughputPattern(unsigned Pattern) const;
1309
1310 /// Return the objective of a combiner pattern.
1311 /// \param Pattern - combiner pattern
1312 virtual CombinerObjective getCombinerObjective(unsigned Pattern) const;
1313
1314 /// Return true if the input \P Inst is part of a chain of dependent ops
1315 /// that are suitable for reassociation, otherwise return false.
1316 /// If the instruction's operands must be commuted to have a previous
1317 /// instruction of the same type define the first source operand, \P Commuted
1318 /// will be set to true.
1319 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1320
1321 /// Return true when \P Inst is both associative and commutative. If \P Invert
1322 /// is true, then the inverse of \P Inst operation must be tested.
1324 bool Invert = false) const {
1325 return false;
1326 }
1327
1328 /// Find chains of accumulations that can be rewritten as a tree for increased
1329 /// ILP.
1330 bool getAccumulatorReassociationPatterns(
1331 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const;
1332
1333 /// Find the chain of accumulator instructions in \P MBB and return them in
1334 /// \P Chain.
1335 void getAccumulatorChain(MachineInstr *CurrentInstr,
1336 SmallVectorImpl<Register> &Chain) const;
1337
1338 /// Return true when \P OpCode is an instruction which performs
1339 /// accumulation into one of its operand registers.
1340 virtual bool isAccumulationOpcode(unsigned Opcode) const { return false; }
1341
1342 /// Returns an opcode which defines the accumulator used by \P Opcode.
1343 virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const {
1344 llvm_unreachable("Function not implemented for target!");
1345 return 0;
1346 }
1347
1348 /// Returns the opcode that should be use to reduce accumulation registers.
1349 virtual unsigned
1350 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const {
1351 llvm_unreachable("Function not implemented for target!");
1352 return 0;
1353 }
1354
1355 /// Reduces branches of the accumulator tree into a single register.
1356 void reduceAccumulatorTree(SmallVectorImpl<Register> &RegistersToReduce,
1358 MachineFunction &MF, MachineInstr &Root,
1360 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1361 Register ResultReg) const;
1362
1363 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
1364 /// for sub and vice versa).
1365 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
1366 return std::nullopt;
1367 }
1368
1369 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
1370 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;
1371
1372 /// Return true when \P Inst has reassociable operands in the same \P MBB.
1373 virtual bool hasReassociableOperands(const MachineInstr &Inst,
1374 const MachineBasicBlock *MBB) const;
1375
1376 /// Return true when \P Inst has reassociable sibling.
1377 virtual bool hasReassociableSibling(const MachineInstr &Inst,
1378 bool &Commuted) const;
1379
1380 /// When getMachineCombinerPatterns() finds patterns, this function generates
1381 /// the instructions that could replace the original code sequence. The client
1382 /// has to decide whether the actual replacement is beneficial or not.
1383 /// \param Root - Instruction that could be combined with one of its operands
1384 /// \param Pattern - Combination pattern for Root
1385 /// \param InsInstrs - Vector of new instructions that implement Pattern
1386 /// \param DelInstrs - Old instructions, including Root, that could be
1387 /// replaced by InsInstr
1388 /// \param InstIdxForVirtReg - map of virtual register to instruction in
1389 /// InsInstr that defines it
1390 virtual void genAlternativeCodeSequence(
1391 MachineInstr &Root, unsigned Pattern,
1394 DenseMap<Register, unsigned> &InstIdxForVirtReg) const;
1395
1396 /// When calculate the latency of the root instruction, accumulate the
1397 /// latency of the sequence to the root latency.
1398 /// \param Root - Instruction that could be combined with one of its operands
1400 return true;
1401 }
1402
1403 /// The returned array encodes the operand index for each parameter because
1404 /// the operands may be commuted; the operand indices for associative
1405 /// operations might also be target-specific. Each element specifies the index
1406 /// of {Prev, A, B, X, Y}.
1407 virtual void
1408 getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern,
1409 std::array<unsigned, 5> &OperandIndices) const;
1410
1411 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1412 /// reduce critical path length.
1413 void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1417 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const;
1418
1419 /// Reassociation of some instructions requires inverse operations (e.g.
1420 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
1421 /// (new root opcode, new prev opcode) that must be used to reassociate \P
1422 /// Root and \P Prev accoring to \P Pattern.
1423 std::pair<unsigned, unsigned>
1424 getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root,
1425 const MachineInstr &Prev) const;
1426
1427 /// The limit on resource length extension we accept in MachineCombiner Pass.
1428 virtual int getExtendResourceLenLimit() const { return 0; }
1429
1430 /// This is an architecture-specific helper function of reassociateOps.
1431 /// Set special operand attributes for new instructions after reassociation.
1432 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1433 MachineInstr &NewMI1,
1434 MachineInstr &NewMI2) const {}
1435
1436 /// Return true when a target supports MachineCombiner.
1437 virtual bool useMachineCombiner() const { return false; }
1438
1439 /// Return a strategy that MachineCombiner must use when creating traces.
1440 virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const;
1441
1442 /// Return true if the given SDNode can be copied during scheduling
1443 /// even if it has glue.
1444 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1445
1446protected:
1447 /// Target-dependent implementation for foldMemoryOperand.
1448 /// Target-independent code in foldMemoryOperand will
1449 /// take care of adding a MachineMemOperand to the newly created instruction.
1450 /// The instruction and any auxiliary instructions necessary will be inserted
1451 /// at InsertPt.
1452 virtual MachineInstr *
1455 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1456 LiveIntervals *LIS = nullptr,
1457 VirtRegMap *VRM = nullptr) const {
1458 return nullptr;
1459 }
1460
1461 /// Target-dependent implementation for foldMemoryOperand.
1462 /// Target-independent code in foldMemoryOperand will
1463 /// take care of adding a MachineMemOperand to the newly created instruction.
1464 /// The instruction and any auxiliary instructions necessary will be inserted
1465 /// at InsertPt.
1468 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1469 LiveIntervals *LIS = nullptr) const {
1470 return nullptr;
1471 }
1472
1473 /// Target-dependent implementation of getRegSequenceInputs.
1474 ///
1475 /// \returns true if it is possible to build the equivalent
1476 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1477 ///
1478 /// \pre MI.isRegSequenceLike().
1479 ///
1480 /// \see TargetInstrInfo::getRegSequenceInputs.
1482 const MachineInstr &MI, unsigned DefIdx,
1483 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1484 return false;
1485 }
1486
1487 /// Target-dependent implementation of getExtractSubregInputs.
1488 ///
1489 /// \returns true if it is possible to build the equivalent
1490 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1491 ///
1492 /// \pre MI.isExtractSubregLike().
1493 ///
1494 /// \see TargetInstrInfo::getExtractSubregInputs.
1496 unsigned DefIdx,
1497 RegSubRegPairAndIdx &InputReg) const {
1498 return false;
1499 }
1500
1501 /// Target-dependent implementation of getInsertSubregInputs.
1502 ///
1503 /// \returns true if it is possible to build the equivalent
1504 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1505 ///
1506 /// \pre MI.isInsertSubregLike().
1507 ///
1508 /// \see TargetInstrInfo::getInsertSubregInputs.
1509 virtual bool
1511 RegSubRegPair &BaseReg,
1512 RegSubRegPairAndIdx &InsertedReg) const {
1513 return false;
1514 }
1515
1516public:
1517 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1518 /// a store or a load and a store into two or more instruction. If this is
1519 /// possible, returns true as well as the new instructions by reference.
1520 virtual bool
1522 bool UnfoldLoad, bool UnfoldStore,
1523 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1524 return false;
1525 }
1526
1528 SmallVectorImpl<SDNode *> &NewNodes) const {
1529 return false;
1530 }
1531
1532 /// Returns the opcode of the would be new
1533 /// instruction after load / store are unfolded from an instruction of the
1534 /// specified opcode. It returns zero if the specified unfolding is not
1535 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1536 /// index of the operand which will hold the register holding the loaded
1537 /// value.
1538 virtual unsigned
1539 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1540 unsigned *LoadRegIndex = nullptr) const {
1541 return 0;
1542 }
1543
1544 /// This is used by the pre-regalloc scheduler to determine if two loads are
1545 /// loading from the same base address. It should only return true if the base
1546 /// pointers are the same and the only differences between the two addresses
1547 /// are the offset. It also returns the offsets by reference.
1548 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1549 int64_t &Offset1,
1550 int64_t &Offset2) const {
1551 return false;
1552 }
1553
1554 /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1555 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1556 /// On some targets if two loads are loading from
1557 /// addresses in the same cache line, it's better if they are scheduled
1558 /// together. This function takes two integers that represent the load offsets
1559 /// from the common base address. It returns true if it decides it's desirable
1560 /// to schedule the two loads together. "NumLoads" is the number of loads that
1561 /// have already been scheduled after Load1.
1562 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1563 int64_t Offset1, int64_t Offset2,
1564 unsigned NumLoads) const {
1565 return false;
1566 }
1567
1568 /// Get the base operand and byte offset of an instruction that reads/writes
1569 /// memory. This is a convenience function for callers that are only prepared
1570 /// to handle a single base operand.
1571 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1572 /// abstraction that supports negative offsets.
1573 bool getMemOperandWithOffset(const MachineInstr &MI,
1574 const MachineOperand *&BaseOp, int64_t &Offset,
1575 bool &OffsetIsScalable,
1576 const TargetRegisterInfo *TRI) const;
1577
1578 /// Get zero or more base operands and the byte offset of an instruction that
1579 /// reads/writes memory. Note that there may be zero base operands if the
1580 /// instruction accesses a constant address.
1581 /// It returns false if MI does not read/write memory.
1582 /// It returns false if base operands and offset could not be determined.
1583 /// It is not guaranteed to always recognize base operands and offsets in all
1584 /// cases.
1585 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1586 /// abstraction that supports negative offsets.
1589 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
1590 const TargetRegisterInfo *TRI) const {
1591 return false;
1592 }
1593
1594 /// Return true if the instruction contains a base register and offset. If
1595 /// true, the function also sets the operand position in the instruction
1596 /// for the base register and offset.
1598 unsigned &BasePos,
1599 unsigned &OffsetPos) const {
1600 return false;
1601 }
1602
1603 /// Target dependent implementation to get the values constituting the address
1604 /// MachineInstr that is accessing memory. These values are returned as a
1605 /// struct ExtAddrMode which contains all relevant information to make up the
1606 /// address.
1607 virtual std::optional<ExtAddrMode>
1609 const TargetRegisterInfo *TRI) const {
1610 return std::nullopt;
1611 }
1612
1613 /// Check if it's possible and beneficial to fold the addressing computation
1614 /// `AddrI` into the addressing mode of the load/store instruction `MemI`. The
1615 /// memory instruction is a user of the virtual register `Reg`, which in turn
1616 /// is the ultimate destination of zero or more COPY instructions from the
1617 /// output register of `AddrI`.
1618 /// Return the adddressing mode after folding in `AM`.
1620 const MachineInstr &AddrI,
1621 ExtAddrMode &AM) const {
1622 return false;
1623 }
1624
1625 /// Emit a load/store instruction with the same value register as `MemI`, but
1626 /// using the address from `AM`. The addressing mode must have been obtained
1627 /// from `canFoldIntoAddr` for the same memory instruction.
1629 const ExtAddrMode &AM) const {
1630 llvm_unreachable("target did not implement emitLdStWithAddr()");
1631 }
1632
1633 /// Returns true if MI's Def is NullValueReg, and the MI
1634 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
1635 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
1636 /// function can return true even if becomes zero. Specifically cases such as
1637 /// NullValueReg = shl NullValueReg, 63.
1639 const Register NullValueReg,
1640 const TargetRegisterInfo *TRI) const {
1641 return false;
1642 }
1643
1644 /// If the instruction is an increment of a constant value, return the amount.
1645 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1646 return false;
1647 }
1648
1649 /// Returns true if the two given memory operations should be scheduled
1650 /// adjacent. Note that you have to add:
1651 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1652 /// or
1653 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1654 /// to TargetMachine::createMachineScheduler() to have an effect.
1655 ///
1656 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
1657 /// \p Offset1 and \p Offset2 are the byte offsets for the memory
1658 /// operations.
1659 /// \p OffsetIsScalable1 and \p OffsetIsScalable2 indicate if the offset is
1660 /// scaled by a runtime quantity.
1661 /// \p ClusterSize is the number of operations in the resulting load/store
1662 /// cluster if this hook returns true.
1663 /// \p NumBytes is the number of bytes that will be loaded from all the
1664 /// clustered loads if this hook returns true.
1666 int64_t Offset1, bool OffsetIsScalable1,
1668 int64_t Offset2, bool OffsetIsScalable2,
1669 unsigned ClusterSize,
1670 unsigned NumBytes) const {
1671 llvm_unreachable("target did not implement shouldClusterMemOps()");
1672 }
1673
1674 /// Reverses the branch condition of the specified condition list,
1675 /// returning false on success and true if it cannot be reversed.
1676 virtual bool
1680
1681 /// Insert a noop into the instruction stream at the specified point.
1682 virtual void insertNoop(MachineBasicBlock &MBB,
1684
1685 /// Insert noops into the instruction stream at the specified point.
1686 virtual void insertNoops(MachineBasicBlock &MBB,
1688 unsigned Quantity) const;
1689
1690 /// Return the noop instruction to use for a noop.
1691 virtual MCInst getNop() const;
1692
1693 /// Return true for post-incremented instructions.
1694 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1695
1696 /// Returns true if the instruction is already predicated.
1697 virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1698
1699 /// Assumes the instruction is already predicated and returns true if the
1700 /// instruction can be predicated again.
1701 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
1702 assert(isPredicated(MI) && "Instruction is not predicated");
1703 return false;
1704 }
1705
1706 // Returns a MIRPrinter comment for this machine operand.
1707 virtual std::string
1708 createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
1709 unsigned OpIdx, const TargetRegisterInfo *TRI) const;
1710
1711 /// Returns true if the instruction is a
1712 /// terminator instruction that has not been predicated.
1713 bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1714
1715 /// Returns true if MI is an unconditional tail call.
1716 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1717 return false;
1718 }
1719
1720 /// Returns true if the tail call can be made conditional on BranchCond.
1722 const MachineInstr &TailCall) const {
1723 return false;
1724 }
1725
1726 /// Replace the conditional branch in MBB with a conditional tail call.
1729 const MachineInstr &TailCall) const {
1730 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1731 }
1732
1733 /// Convert the instruction into a predicated instruction.
1734 /// It returns true if the operation was successful.
1735 virtual bool PredicateInstruction(MachineInstr &MI,
1736 ArrayRef<MachineOperand> Pred) const;
1737
1738 /// Returns true if the first specified predicate
1739 /// subsumes the second, e.g. GE subsumes GT.
1741 ArrayRef<MachineOperand> Pred2) const {
1742 return false;
1743 }
1744
1745 /// If the specified instruction defines any predicate
1746 /// or condition code register(s) used for predication, returns true as well
1747 /// as the definition predicate(s) by reference.
1748 /// SkipDead should be set to false at any point that dead
1749 /// predicate instructions should be considered as being defined.
1750 /// A dead predicate instruction is one that is guaranteed to be removed
1751 /// after a call to PredicateInstruction.
1753 std::vector<MachineOperand> &Pred,
1754 bool SkipDead) const {
1755 return false;
1756 }
1757
1758 /// Return true if the specified instruction can be predicated.
1759 /// By default, this returns true for every instruction with a
1760 /// PredicateOperand.
1761 virtual bool isPredicable(const MachineInstr &MI) const {
1762 return MI.getDesc().isPredicable();
1763 }
1764
1765 /// Return true if it's safe to move a machine
1766 /// instruction that defines the specified register class.
1767 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1768 return true;
1769 }
1770
1771 /// Return true if it's safe to move a machine instruction.
1772 /// This allows the backend to prevent certain special instruction
1773 /// sequences from being broken by instruction motion in optimization
1774 /// passes.
1775 /// By default, this returns true for every instruction.
1776 virtual bool isSafeToMove(const MachineInstr &MI,
1777 const MachineBasicBlock *MBB,
1778 const MachineFunction &MF) const {
1779 return true;
1780 }
1781
1782 /// Test if the given instruction should be considered a scheduling boundary.
1783 /// This primarily includes labels and terminators.
1784 virtual bool isSchedulingBoundary(const MachineInstr &MI,
1785 const MachineBasicBlock *MBB,
1786 const MachineFunction &MF) const;
1787
1788 /// Measure the specified inline asm to determine an approximation of its
1789 /// length.
1790 virtual unsigned getInlineAsmLength(
1791 const char *Str, const MCAsmInfo &MAI,
1792 const TargetSubtargetInfo *STI = nullptr) const;
1793
1794 /// Allocate and return a hazard recognizer to use for this target when
1795 /// scheduling the machine instructions before register allocation.
1796 virtual ScheduleHazardRecognizer *
1797 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1798 const ScheduleDAG *DAG) const;
1799
1800 /// Allocate and return a hazard recognizer to use for this target when
1801 /// scheduling the machine instructions before register allocation.
1802 virtual ScheduleHazardRecognizer *
1803 CreateTargetMIHazardRecognizer(const InstrItineraryData *,
1804 const ScheduleDAGMI *DAG) const;
1805
1806 /// Allocate and return a hazard recognizer to use for this target when
1807 /// scheduling the machine instructions after register allocation.
1808 virtual ScheduleHazardRecognizer *
1809 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
1810 const ScheduleDAG *DAG) const;
1811
1812 /// Allocate and return a hazard recognizer to use for by non-scheduling
1813 /// passes.
1814 virtual ScheduleHazardRecognizer *
1816 return nullptr;
1817 }
1818
1819 /// Provide a global flag for disabling the PreRA hazard recognizer that
1820 /// targets may choose to honor.
1821 bool usePreRAHazardRecognizer() const;
1822
1823 /// For a comparison instruction, return the source registers
1824 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1825 /// compares against in CmpValue. Return true if the comparison instruction
1826 /// can be analyzed.
1827 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1828 Register &SrcReg2, int64_t &Mask,
1829 int64_t &Value) const {
1830 return false;
1831 }
1832
1833 /// See if the comparison instruction can be converted
1834 /// into something more efficient. E.g., on ARM most instructions can set the
1835 /// flags register, obviating the need for a separate CMP.
1836 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
1837 Register SrcReg2, int64_t Mask,
1838 int64_t Value,
1839 const MachineRegisterInfo *MRI) const {
1840 return false;
1841 }
1842 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1843
1844 /// Try to remove the load by folding it to a register operand at the use.
1845 /// We fold the load instructions if and only if the
1846 /// def and use are in the same BB. We only look at one load and see
1847 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1848 /// defined by the load we are trying to fold. DefMI returns the machine
1849 /// instruction that defines FoldAsLoadDefReg, and the function returns
1850 /// the machine instruction generated due to folding.
1851 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
1852 const MachineRegisterInfo *MRI,
1853 Register &FoldAsLoadDefReg,
1854 MachineInstr *&DefMI) const;
1855
1856 /// 'Reg' is known to be defined by a move immediate instruction,
1857 /// try to fold the immediate into the use instruction.
1858 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1859 /// then the caller may assume that DefMI has been erased from its parent
1860 /// block. The caller may assume that it will not be erased by this
1861 /// function otherwise.
1864 return false;
1865 }
1866
1867 /// Return the number of u-operations the given machine
1868 /// instruction will be decoded to on the target cpu. The itinerary's
1869 /// IssueWidth is the number of microops that can be dispatched each
1870 /// cycle. An instruction with zero microops takes no dispatch resources.
1871 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1872 const MachineInstr &MI) const;
1873
1874 /// Return true for pseudo instructions that don't consume any
1875 /// machine resources in their current form. These are common cases that the
1876 /// scheduler should consider free, rather than conservatively handling them
1877 /// as instructions with no itinerary.
1878 bool isZeroCost(unsigned Opcode) const {
1879 return Opcode <= TargetOpcode::COPY;
1880 }
1881
1882 virtual std::optional<unsigned>
1883 getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode,
1884 unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const;
1885
1886 /// Compute and return the use operand latency of a given pair of def and use.
1887 /// In most cases, the static scheduling itinerary was enough to determine the
1888 /// operand latency. But it may not be possible for instructions with variable
1889 /// number of defs / uses.
1890 ///
1891 /// This is a raw interface to the itinerary that may be directly overridden
1892 /// by a target. Use computeOperandLatency to get the best estimate of
1893 /// latency.
1894 virtual std::optional<unsigned>
1895 getOperandLatency(const InstrItineraryData *ItinData,
1896 const MachineInstr &DefMI, unsigned DefIdx,
1897 const MachineInstr &UseMI, unsigned UseIdx) const;
1898
1899 /// Compute the instruction latency of a given instruction.
1900 /// If the instruction has higher cost when predicated, it's returned via
1901 /// PredCost.
1902 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1903 const MachineInstr &MI,
1904 unsigned *PredCost = nullptr) const;
1905
1906 virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1907
1908 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1909 SDNode *Node) const;
1910
1911 /// Return the default expected latency for a def based on its opcode.
1912 unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1913 const MachineInstr &DefMI) const;
1914
1915 /// Return true if this opcode has high latency to its result.
1916 virtual bool isHighLatencyDef(int opc) const { return false; }
1917
1918 /// Compute operand latency between a def of 'Reg'
1919 /// and a use in the current loop. Return true if the target considered
1920 /// it 'high'. This is used by optimization passes such as machine LICM to
1921 /// determine whether it makes sense to hoist an instruction out even in a
1922 /// high register pressure situation.
1923 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1924 const MachineRegisterInfo *MRI,
1925 const MachineInstr &DefMI, unsigned DefIdx,
1926 const MachineInstr &UseMI,
1927 unsigned UseIdx) const {
1928 return false;
1929 }
1930
1931 /// Compute operand latency of a def of 'Reg'. Return true
1932 /// if the target considered it 'low'.
1933 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1934 const MachineInstr &DefMI,
1935 unsigned DefIdx) const;
1936
1937 /// Perform target-specific instruction verification.
1938 virtual bool verifyInstruction(const MachineInstr &MI,
1939 StringRef &ErrInfo) const {
1940 return true;
1941 }
1942
1943 /// Return the current execution domain and bit mask of
1944 /// possible domains for instruction.
1945 ///
1946 /// Some micro-architectures have multiple execution domains, and multiple
1947 /// opcodes that perform the same operation in different domains. For
1948 /// example, the x86 architecture provides the por, orps, and orpd
1949 /// instructions that all do the same thing. There is a latency penalty if a
1950 /// register is written in one domain and read in another.
1951 ///
1952 /// This function returns a pair (domain, mask) containing the execution
1953 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1954 /// function can be used to change the opcode to one of the domains in the
1955 /// bit mask. Instructions whose execution domain can't be changed should
1956 /// return a 0 mask.
1957 ///
1958 /// The execution domain numbers don't have any special meaning except domain
1959 /// 0 is used for instructions that are not associated with any interesting
1960 /// execution domain.
1961 ///
1962 virtual std::pair<uint16_t, uint16_t>
1964 return std::make_pair(0, 0);
1965 }
1966
1967 /// Change the opcode of MI to execute in Domain.
1968 ///
1969 /// The bit (1 << Domain) must be set in the mask returned from
1970 /// getExecutionDomain(MI).
1971 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1972
1973 /// Returns the preferred minimum clearance
1974 /// before an instruction with an unwanted partial register update.
1975 ///
1976 /// Some instructions only write part of a register, and implicitly need to
1977 /// read the other parts of the register. This may cause unwanted stalls
1978 /// preventing otherwise unrelated instructions from executing in parallel in
1979 /// an out-of-order CPU.
1980 ///
1981 /// For example, the x86 instruction cvtsi2ss writes its result to bits
1982 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1983 /// the instruction needs to wait for the old value of the register to become
1984 /// available:
1985 ///
1986 /// addps %xmm1, %xmm0
1987 /// movaps %xmm0, (%rax)
1988 /// cvtsi2ss %rbx, %xmm0
1989 ///
1990 /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1991 /// instruction before it can issue, even though the high bits of %xmm0
1992 /// probably aren't needed.
1993 ///
1994 /// This hook returns the preferred clearance before MI, measured in
1995 /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1996 /// instructions before MI. It should only return a positive value for
1997 /// unwanted dependencies. If the old bits of the defined register have
1998 /// useful values, or if MI is determined to otherwise read the dependency,
1999 /// the hook should return 0.
2000 ///
2001 /// The unwanted dependency may be handled by:
2002 ///
2003 /// 1. Allocating the same register for an MI def and use. That makes the
2004 /// unwanted dependency identical to a required dependency.
2005 ///
2006 /// 2. Allocating a register for the def that has no defs in the previous N
2007 /// instructions.
2008 ///
2009 /// 3. Calling breakPartialRegDependency() with the same arguments. This
2010 /// allows the target to insert a dependency breaking instruction.
2011 ///
2012 virtual unsigned
2014 const TargetRegisterInfo *TRI) const {
2015 // The default implementation returns 0 for no partial register dependency.
2016 return 0;
2017 }
2018
2019 /// Return the minimum clearance before an instruction that reads an
2020 /// unused register.
2021 ///
2022 /// For example, AVX instructions may copy part of a register operand into
2023 /// the unused high bits of the destination register.
2024 ///
2025 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
2026 ///
2027 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
2028 /// false dependence on any previous write to %xmm0.
2029 ///
2030 /// This hook works similarly to getPartialRegUpdateClearance, except that it
2031 /// does not take an operand index. Instead sets \p OpNum to the index of the
2032 /// unused register.
2033 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
2034 const TargetRegisterInfo *TRI) const {
2035 // The default implementation returns 0 for no undef register dependency.
2036 return 0;
2037 }
2038
2039 /// Insert a dependency-breaking instruction
2040 /// before MI to eliminate an unwanted dependency on OpNum.
2041 ///
2042 /// If it wasn't possible to avoid a def in the last N instructions before MI
2043 /// (see getPartialRegUpdateClearance), this hook will be called to break the
2044 /// unwanted dependency.
2045 ///
2046 /// On x86, an xorps instruction can be used as a dependency breaker:
2047 ///
2048 /// addps %xmm1, %xmm0
2049 /// movaps %xmm0, (%rax)
2050 /// xorps %xmm0, %xmm0
2051 /// cvtsi2ss %rbx, %xmm0
2052 ///
2053 /// An <imp-kill> operand should be added to MI if an instruction was
2054 /// inserted. This ties the instructions together in the post-ra scheduler.
2055 ///
2056 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
2057 const TargetRegisterInfo *TRI) const {}
2058
2059 /// Create machine specific model for scheduling.
2060 virtual DFAPacketizer *
2062 return nullptr;
2063 }
2064
2065 /// Sometimes, it is possible for the target
2066 /// to tell, even without aliasing information, that two MIs access different
2067 /// memory addresses. This function returns true if two MIs access different
2068 /// memory addresses and false otherwise.
2069 ///
2070 /// Assumes any physical registers used to compute addresses have the same
2071 /// value for both instructions. (This is the most useful assumption for
2072 /// post-RA scheduling.)
2073 ///
2074 /// See also MachineInstr::mayAlias, which is implemented on top of this
2075 /// function.
2076 virtual bool
2078 const MachineInstr &MIb) const {
2079 assert(MIa.mayLoadOrStore() &&
2080 "MIa must load from or modify a memory location");
2081 assert(MIb.mayLoadOrStore() &&
2082 "MIb must load from or modify a memory location");
2083 return false;
2084 }
2085
2086 /// Return the value to use for the MachineCSE's LookAheadLimit,
2087 /// which is a heuristic used for CSE'ing phys reg defs.
2088 virtual unsigned getMachineCSELookAheadLimit() const {
2089 // The default lookahead is small to prevent unprofitable quadratic
2090 // behavior.
2091 return 5;
2092 }
2093
2094 /// Return the maximal number of alias checks on memory operands. For
2095 /// instructions with more than one memory operands, the alias check on a
2096 /// single MachineInstr pair has quadratic overhead and results in
2097 /// unacceptable performance in the worst case. The limit here is to clamp
2098 /// that maximal checks performed. Usually, that's the product of memory
2099 /// operand numbers from that pair of MachineInstr to be checked. For
2100 /// instance, with two MachineInstrs with 4 and 5 memory operands
2101 /// correspondingly, a total of 20 checks are required. With this limit set to
2102 /// 16, their alias check is skipped. We choose to limit the product instead
2103 /// of the individual instruction as targets may have special MachineInstrs
2104 /// with a considerably high number of memory operands, such as `ldm` in ARM.
2105 /// Setting this limit per MachineInstr would result in either too high
2106 /// overhead or too rigid restriction.
2107 virtual unsigned getMemOperandAACheckLimit() const { return 16; }
2108
2109 /// Return an array that contains the ids of the target indices (used for the
2110 /// TargetIndex machine operand) and their names.
2111 ///
2112 /// MIR Serialization is able to serialize only the target indices that are
2113 /// defined by this method.
2116 return {};
2117 }
2118
2119 /// Decompose the machine operand's target flags into two values - the direct
2120 /// target flag value and any of bit flags that are applied.
2121 virtual std::pair<unsigned, unsigned>
2123 return std::make_pair(0u, 0u);
2124 }
2125
2126 /// Return an array that contains the direct target flag values and their
2127 /// names.
2128 ///
2129 /// MIR Serialization is able to serialize only the target flags that are
2130 /// defined by this method.
2133 return {};
2134 }
2135
2136 /// Return an array that contains the bitmask target flag values and their
2137 /// names.
2138 ///
2139 /// MIR Serialization is able to serialize only the target flags that are
2140 /// defined by this method.
2143 return {};
2144 }
2145
2146 /// Return an array that contains the MMO target flag values and their
2147 /// names.
2148 ///
2149 /// MIR Serialization is able to serialize only the MMO target flags that are
2150 /// defined by this method.
2153 return {};
2154 }
2155
2156 /// Determines whether \p Inst is a tail call instruction. Override this
2157 /// method on targets that do not properly set MCID::Return and MCID::Call on
2158 /// tail call instructions."
2159 virtual bool isTailCall(const MachineInstr &Inst) const {
2160 return Inst.isReturn() && Inst.isCall();
2161 }
2162
2163 /// True if the instruction is bound to the top of its basic block and no
2164 /// other instructions shall be inserted before it. This can be implemented
2165 /// to prevent register allocator to insert spills for \p Reg before such
2166 /// instructions.
2168 Register Reg = Register()) const {
2169 return false;
2170 }
2171
2172 /// Allows targets to use appropriate copy instruction while spilitting live
2173 /// range of a register in register allocation.
2175 const MachineFunction &MF) const {
2176 return TargetOpcode::COPY;
2177 }
2178
2179 /// During PHI eleimination lets target to make necessary checks and
2180 /// insert the copy to the PHI destination register in a target specific
2181 /// manner.
2184 const DebugLoc &DL, Register Src, Register Dst) const {
2185 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2186 .addReg(Src);
2187 }
2188
2189 /// During PHI eleimination lets target to make necessary checks and
2190 /// insert the copy to the PHI destination register in a target specific
2191 /// manner.
2194 const DebugLoc &DL, Register Src,
2195 unsigned SrcSubReg,
2196 Register Dst) const {
2197 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2198 .addReg(Src, 0, SrcSubReg);
2199 }
2200
2201 /// Returns a \p outliner::OutlinedFunction struct containing target-specific
2202 /// information for a set of outlining candidates. Returns std::nullopt if the
2203 /// candidates are not suitable for outlining. \p MinRepeats is the minimum
2204 /// number of times the instruction sequence must be repeated.
2205 virtual std::optional<std::unique_ptr<outliner::OutlinedFunction>>
2207 const MachineModuleInfo &MMI,
2208 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
2209 unsigned MinRepeats) const {
2211 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
2212 }
2213
2214 /// Optional target hook to create the LLVM IR attributes for the outlined
2215 /// function. If overridden, the overriding function must call the default
2216 /// implementation.
2217 virtual void mergeOutliningCandidateAttributes(
2218 Function &F, std::vector<outliner::Candidate> &Candidates) const;
2219
2220protected:
2221 /// Target-dependent implementation for getOutliningTypeImpl.
2222 virtual outliner::InstrType
2224 MachineBasicBlock::iterator &MIT, unsigned Flags) const {
2226 "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!");
2227 }
2228
2229public:
2230 /// Returns how or if \p MIT should be outlined. \p Flags is the
2231 /// target-specific information returned by isMBBSafeToOutlineFrom.
2232 outliner::InstrType getOutliningType(const MachineModuleInfo &MMI,
2234 unsigned Flags) const;
2235
2236 /// Optional target hook that returns true if \p MBB is safe to outline from,
2237 /// and returns any target-specific information in \p Flags.
2238 virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
2239 unsigned &Flags) const;
2240
2241 /// Optional target hook which partitions \p MBB into outlinable ranges for
2242 /// instruction mapping purposes. Each range is defined by two iterators:
2243 /// [start, end).
2244 ///
2245 /// Ranges are expected to be ordered top-down. That is, ranges closer to the
2246 /// top of the block should come before ranges closer to the end of the block.
2247 ///
2248 /// Ranges cannot overlap.
2249 ///
2250 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
2251 ///
2252 /// All instructions not present in an outlinable range are considered
2253 /// illegal.
2254 virtual SmallVector<
2255 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
2256 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const {
2257 return {std::make_pair(MBB.begin(), MBB.end())};
2258 }
2259
2260 /// Insert a custom frame for outlined functions.
2262 const outliner::OutlinedFunction &OF) const {
2264 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
2265 }
2266
2267 /// Insert a call to an outlined function into the program.
2268 /// Returns an iterator to the spot where we inserted the call. This must be
2269 /// implemented by the target.
2273 outliner::Candidate &C) const {
2275 "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
2276 }
2277
2278 /// Insert an architecture-specific instruction to clear a register. If you
2279 /// need to avoid sideeffects (e.g. avoid XOR on x86, which sets EFLAGS), set
2280 /// \p AllowSideEffects to \p false.
2283 DebugLoc &DL,
2284 bool AllowSideEffects = true) const {
2285#if 0
2286 // FIXME: This should exist once all platforms that use stack protectors
2287 // implements it.
2289 "Target didn't implement TargetInstrInfo::buildClearRegister!");
2290#endif
2291 }
2292
2293 /// Return true if the function can safely be outlined from.
2294 /// A function \p MF is considered safe for outlining if an outlined function
2295 /// produced from instructions in F will produce a program which produces the
2296 /// same output for any set of given inputs.
2298 bool OutlineFromLinkOnceODRs) const {
2299 llvm_unreachable("Target didn't implement "
2300 "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
2301 }
2302
2303 /// Return true if the function should be outlined from by default.
2305 return false;
2306 }
2307
2308 /// Return true if the function is a viable candidate for machine function
2309 /// splitting. The criteria for if a function can be split may vary by target.
2310 virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const;
2311
2312 /// Return true if the MachineBasicBlock can safely be split to the cold
2313 /// section. On AArch64, certain instructions may cause a block to be unsafe
2314 /// to split to the cold section.
2315 virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const {
2316 return true;
2317 }
2318
2319 /// Produce the expression describing the \p MI loading a value into
2320 /// the physical register \p Reg. This hook should only be used with
2321 /// \p MIs belonging to VReg-less functions.
2322 virtual std::optional<ParamLoadedValue>
2323 describeLoadedValue(const MachineInstr &MI, Register Reg) const;
2324
2325 /// Given the generic extension instruction \p ExtMI, returns true if this
2326 /// extension is a likely candidate for being folded into an another
2327 /// instruction.
2329 MachineRegisterInfo &MRI) const {
2330 return false;
2331 }
2332
2333 /// Return MIR formatter to format/parse MIR operands. Target can override
2334 /// this virtual function and return target specific MIR formatter.
2335 virtual const MIRFormatter *getMIRFormatter() const {
2336 if (!Formatter)
2337 Formatter = std::make_unique<MIRFormatter>();
2338 return Formatter.get();
2339 }
2340
2341 /// Returns the target-specific default value for tail duplication.
2342 /// This value will be used if the tail-dup-placement-threshold argument is
2343 /// not provided.
2344 virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
2345 return OptLevel >= CodeGenOptLevel::Aggressive ? 4 : 2;
2346 }
2347
2348 /// Returns the target-specific default value for tail merging.
2349 /// This value will be used if the tail-merge-size argument is not provided.
2350 virtual unsigned getTailMergeSize(const MachineFunction &MF) const {
2351 return 3;
2352 }
2353
2354 /// Returns the callee operand from the given \p MI.
2355 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
2356 assert(MI.isCall());
2357
2358 switch (MI.getOpcode()) {
2359 case TargetOpcode::STATEPOINT:
2360 case TargetOpcode::STACKMAP:
2361 case TargetOpcode::PATCHPOINT:
2362 return MI.getOperand(3);
2363 default:
2364 return MI.getOperand(0);
2365 }
2366
2367 llvm_unreachable("impossible call instruction");
2368 }
2369
2370 /// Return the uniformity behavior of the given instruction.
2371 virtual InstructionUniformity
2375
2376 /// Returns true if the given \p MI defines a TargetIndex operand that can be
2377 /// tracked by their offset, can have values, and can have debug info
2378 /// associated with it. If so, sets \p Index and \p Offset of the target index
2379 /// operand.
2380 virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index,
2381 int64_t &Offset) const {
2382 return false;
2383 }
2384
2385 // Get the call frame size just before MI.
2386 unsigned getCallFrameSizeAt(MachineInstr &MI) const;
2387
2388 /// Fills in the necessary MachineOperands to refer to a frame index.
2389 /// The best way to understand this is to print `asm(""::"m"(x));` after
2390 /// finalize-isel. Example:
2391 /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg
2392 /// we would add placeholders for: ^ ^ ^ ^
2394 int FI) const {
2395 llvm_unreachable("unknown number of operands necessary");
2396 }
2397
2398private:
2399 mutable std::unique_ptr<MIRFormatter> Formatter;
2400 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
2401 unsigned CatchRetOpcode;
2402 unsigned ReturnOpcode;
2403};
2404
2405/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
2409
2411 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
2412 SubRegInfo::getEmptyKey());
2413 }
2414
2416 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
2417 SubRegInfo::getTombstoneKey());
2418 }
2419
2420 /// Reuse getHashValue implementation from
2421 /// std::pair<unsigned, unsigned>.
2422 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
2424 std::make_pair(Val.Reg, Val.SubReg));
2425 }
2426
2429 return LHS == RHS;
2430 }
2431};
2432
2433} // end namespace llvm
2434
2435#endif // LLVM_CODEGEN_TARGETINSTRINFO_H
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define LLVM_ABI
Definition Compiler.h:213
DXIL Forward Handle Accesses
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Contains all data structures shared between the outliner implemented in MachineOutliner....
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getInstSizeInBytes(const MachineInstr &MI, const SystemZInstrInfo *TII)
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
A debug info location.
Definition DebugLoc.h:123
Itinerary data supplied by a subtarget to be used by a target.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MIRFormater - Interface to format MIR operand based on target.
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCall(QueryType Type=AnyInBundle) const
A description of a memory reference used in the backend.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Represents one node in the SelectionDAG.
This class represents the scheduled code.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
virtual bool isMVEExpanderSupported()
Return true if the target can expand pipelined schedule with modulo variable expansion.
virtual void createRemainingIterationsGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, DenseMap< MachineInstr *, MachineInstr * > &LastStage0Insts)
Create a condition to determine if the remaining trip count for a phase is greater than TC.
virtual void adjustTripCount(int TripCountAdjust)=0
Modify the loop such that the trip count is OriginalTC + TripCountAdjust.
virtual void disposed(LiveIntervals *LIS=nullptr)
Called when the loop is being removed.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const =0
Return true if the given instruction should not be pipelined and should be ignored.
virtual void setPreheader(MachineBasicBlock *NewPreheader)=0
Called when the loop's preheader has been modified to NewPreheader.
virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
Return true if the proposed schedule should used.
virtual std::optional< bool > createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond)=0
Create a condition to determine if the trip count of the loop is greater than TC, where TC is always ...
TargetInstrInfo - Interface to description of machine instruction set.
virtual SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook which partitions MBB into outlinable ranges for instruction mapping purposes.
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
virtual bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
virtual bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const
Assumes the instruction is already predicated and returns true if the instruction can be predicated a...
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don't consume any machine resources in their current form.
virtual void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const
Insert an architecture-specific instruction to clear a register.
virtual void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const
Fills in the necessary MachineOperands to refer to a frame index.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const
Given the generic extension instruction ExtMI, returns true if this extension is a likely candidate f...
virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const
const TargetRegisterInfo & TRI
virtual std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr * > &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
bool isTriviallyReMaterializable(const MachineInstr &MI) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
virtual void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &NewMIs, bool PreferFalse=false) const
Given a select instruction that was understood by analyzeSelect and returned Optimizable = true,...
virtual bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const
Check if it's possible and beneficial to fold the addressing computation AddrI into the addressing mo...
virtual const MIRFormatter * getMIRFormatter() const
Return MIR formatter to format/parse MIR operands.
bool isReMaterializable(const MachineInstr &MI) const
Return true if the instruction would be materializable at a point in the containing function where al...
virtual bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const
Return true if target supports reassociation of instructions in machine combiner pass to reduce regis...
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
bool isFullCopyInstr(const MachineInstr &MI) const
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const
Returns true if MI's Def is NullValueReg, and the MI does not change the Zero value.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
virtual void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const
Fix up the placeholder we may add in genAlternativeCodeSequence().
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of 'Reg' and a use in the current loop.
bool isUnspillableTerminator(const MachineInstr *MI) const
Return true if the given instruction is terminator that is unspillable, according to isUnspillableTer...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it's profitable to unpredicate one side of a 'diamond', i.e.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
virtual int getExtendResourceLenLimit() const
The limit on resource length extension we accept in MachineCombiner Pass.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true,...
virtual bool shouldBreakCriticalEdgeToSink(MachineInstr &MI) const
For a "cheap" instruction which doesn't enable additional sinking, should MachineSink break a critica...
virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const
Sometimes, it is possible for the target to tell, even without aliasing information,...
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
unsigned getReturnOpcode() const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool isIgnorableUse(const MachineOperand &MO) const
Given MO is a PhysReg use return if it can be ignored for the purpose of instruction rematerializatio...
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
virtual bool shouldPostRASink(const MachineInstr &MI) const
virtual bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const
Returns true if the two given memory operations should be scheduled adjacent.
virtual unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const
Allows targets to use appropriate copy instruction while spilitting live range of a register in regis...
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient.
virtual unsigned getMemOperandAACheckLimit() const
Return the maximal number of alias checks on memory operands.
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const
Return true if the MachineBasicBlock can safely be split to the cold section.
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
virtual bool simplifyInstruction(MachineInstr &MI) const
If possible, converts the instruction to a simplified/canonical form.
virtual std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const
Target dependent implementation to get the values constituting the address MachineInstr that is acces...
virtual std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const
Target-dependent implementation for IsCopyInstr.
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const
Returns true if MI is an instruction that defines Reg to have a constant value and the value is recor...
static bool isGenericOpcode(unsigned Opc)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
const TargetRegisterInfo & getRegisterInfo() const
std::optional< DestSourcePair > isCopyLikeInstr(const MachineInstr &MI) const
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understood.
unsigned getCatchReturnOpcode() const
virtual unsigned getTailMergeSize(const MachineFunction &MF) const
Returns the target-specific default value for tail merging.
virtual InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const
Return the uniformity behavior of the given instruction.
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
const int16_t *const RegClassByHwMode
Subtarget specific sub-array of MCInstrInfo's RegClassByHwModeTables (i.e.
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const
'Reg' is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual int getJumpTableIndex(const MachineInstr &MI) const
Return an index for MachineJumpTableInfo if insn is an indirect jump using a jump table,...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, int64_t &Offset) const
Returns true if the given MI defines a TargetIndex operand that can be tracked by their offset,...
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const
Allow targets to tell MachineVerifier whether a specific register MachineOperand can be used as part ...
virtual std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
TargetInstrInfo(const TargetInstrInfo &)=delete
virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const
Return an estimate for the code size reduction (in bytes) which will be caused by removing the given ...
virtual ~TargetInstrInfo()
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const
Return the increase in code size needed to predicate a contiguous run of NumInsts instructions.
virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const
When calculate the latency of the root instruction, accumulate the latency of the sequence to the roo...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
virtual bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const
Analyze the given select instruction, returning true if it cannot be understood.
virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it's safe to move a machine instruction that defines the specified register class.
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const
Return true if the given terminator MI is not expected to spill.
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
static bool isGenericAtomicRMWOpcode(unsigned Opc)
virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const
Returns true if the target has a preference on the operands order of the given machine instruction.
static const unsigned CommuteAnyOperandIndex
virtual bool isSafeToMove(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Return true if it's safe to move a machine instruction.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
virtual MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const
Emit a load/store instruction with the same value register as MemI, but using the address from AM.
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual bool shouldHoist(const MachineInstr &MI, const MachineLoop *FromLoop) const
Return false if the instruction should not be hoisted by MachineLICM.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const
Returns the target-specific default value for tail duplication.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode * > &NewNodes) const
virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE's LookAheadLimit, which is a heuristic used for CSE'ing ph...
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
static constexpr TypeSize getZero()
Definition TypeSize.h:349
LLVM Value Representation.
Definition Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineTraceStrategy
Strategies for selecting traces.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
DWARFExpression::Operation Op
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
GenericCycleInfo< MachineSSAContext > MachineCycleInfo
#define N
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
static TargetInstrInfo::RegSubRegPair getEmptyKey()
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
const MachineOperand * Source
DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
const MachineOperand * Destination
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
ExtAddrMode()=default
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
RegImmPair(Register Reg, int64_t Imm)
Represents a predicate at the MachineFunction level.
bool SingleUseCondition
SingleUseCondition is true if ConditionDef is dead except for the branch(es) at the end of the basic ...
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
RegSubRegPairAndIdx(Register Reg=Register(), unsigned SubReg=0, unsigned SubIdx=0)
A pair composed of a register and a sub-register index.
bool operator==(const RegSubRegPair &P) const
RegSubRegPair(Register Reg=Register(), unsigned SubReg=0)
bool operator!=(const RegSubRegPair &P) const
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.