LLVM 18.0.0git
PPCInstrInfo.h
Go to the documentation of this file.
1//===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the PowerPC implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H
14#define LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H
15
17#include "PPCRegisterInfo.h"
18#include "llvm/ADT/SmallSet.h"
20
21#define GET_INSTRINFO_HEADER
22#include "PPCGenInstrInfo.inc"
23
24namespace llvm {
25
26// Instructions that have an immediate form might be convertible to that
27// form if the correct input is a result of a load immediate. In order to
28// know whether the transformation is special, we might need to know some
29// of the details of the two forms.
31 // Is the immediate field in the immediate form signed or unsigned?
33 // Does the immediate need to be a multiple of some value?
35 // Is R0/X0 treated specially by the original r+r instruction?
36 // If so, in which operand?
38 // Is R0/X0 treated specially by the new r+i instruction?
39 // If so, in which operand?
41 // Is the operation commutative?
43 // The operand number to check for add-immediate def.
45 // The operand number for the immediate.
47 // The opcode of the new instruction.
49 // The size of the immediate.
51 // The immediate should be truncated to N bits.
53 // Is the instruction summing the operand
55};
56
57// Information required to convert an instruction to just a materialized
58// immediate.
60 unsigned Imm : 16;
61 unsigned Is64Bit : 1;
62 unsigned SetCR : 1;
63};
64
65// Index into the OpcodesForSpill array.
84 SOK_LastOpcodeSpill // This must be last on the enum.
85};
86
87// Define list of load and store spill opcodes.
88#define NoInstr PPC::INSTRUCTION_LIST_END
89#define Pwr8LoadOpcodes \
90 { \
91 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
92 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX, \
93 PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVLDD, \
94 PPC::RESTORE_QUADWORD \
95 }
96
97#define Pwr9LoadOpcodes \
98 { \
99 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
100 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
101 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, \
102 NoInstr, NoInstr, PPC::RESTORE_QUADWORD \
103 }
104
105#define Pwr10LoadOpcodes \
106 { \
107 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
108 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
109 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \
110 PPC::RESTORE_UACC, NoInstr, NoInstr, PPC::RESTORE_QUADWORD \
111 }
112
113#define FutureLoadOpcodes \
114 { \
115 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
116 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
117 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \
118 PPC::RESTORE_UACC, PPC::RESTORE_WACC, NoInstr, PPC::RESTORE_QUADWORD \
119 }
120
121#define Pwr8StoreOpcodes \
122 { \
123 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
124 PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, \
125 PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVSTDD, \
126 PPC::SPILL_QUADWORD \
127 }
128
129#define Pwr9StoreOpcodes \
130 { \
131 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
132 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
133 PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, NoInstr, \
134 PPC::SPILL_QUADWORD \
135 }
136
137#define Pwr10StoreOpcodes \
138 { \
139 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
140 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
141 PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \
142 NoInstr, NoInstr, PPC::SPILL_QUADWORD \
143 }
144
145#define FutureStoreOpcodes \
146 { \
147 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
148 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
149 PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \
150 PPC::SPILL_WACC, NoInstr, PPC::SPILL_QUADWORD \
151 }
152
153// Initialize arrays for load and store spill opcodes on supported subtargets.
154#define StoreOpcodesForSpill \
155 { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes, FutureStoreOpcodes }
156#define LoadOpcodesForSpill \
157 { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes, FutureLoadOpcodes }
158
159class PPCSubtarget;
161 PPCSubtarget &Subtarget;
162 const PPCRegisterInfo RI;
163 const unsigned StoreSpillOpcodesArray[4][SOK_LastOpcodeSpill] =
165 const unsigned LoadSpillOpcodesArray[4][SOK_LastOpcodeSpill] =
167
168 void StoreRegToStackSlot(MachineFunction &MF, unsigned SrcReg, bool isKill,
169 int FrameIdx, const TargetRegisterClass *RC,
170 SmallVectorImpl<MachineInstr *> &NewMIs) const;
171 void LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
172 unsigned DestReg, int FrameIdx,
173 const TargetRegisterClass *RC,
174 SmallVectorImpl<MachineInstr *> &NewMIs) const;
175
176 // Replace the instruction with single LI if possible. \p DefMI must be LI or
177 // LI8.
178 bool simplifyToLI(MachineInstr &MI, MachineInstr &DefMI,
179 unsigned OpNoForForwarding, MachineInstr **KilledDef) const;
180 // If the inst is imm-form and its register operand is produced by a ADDI, put
181 // the imm into the inst directly and remove the ADDI if possible.
182 bool transformToNewImmFormFedByAdd(MachineInstr &MI, MachineInstr &DefMI,
183 unsigned OpNoForForwarding) const;
184 // If the inst is x-form and has imm-form and one of its operand is produced
185 // by a LI, put the imm into the inst directly and remove the LI if possible.
186 bool transformToImmFormFedByLI(MachineInstr &MI, const ImmInstrInfo &III,
187 unsigned ConstantOpNo,
188 MachineInstr &DefMI) const;
189 // If the inst is x-form and has imm-form and one of its operand is produced
190 // by an add-immediate, try to transform it when possible.
191 bool transformToImmFormFedByAdd(MachineInstr &MI, const ImmInstrInfo &III,
192 unsigned ConstantOpNo, MachineInstr &DefMI,
193 bool KillDefMI) const;
194 // Try to find that, if the instruction 'MI' contains any operand that
195 // could be forwarded from some inst that feeds it. If yes, return the
196 // Def of that operand. And OpNoForForwarding is the operand index in
197 // the 'MI' for that 'Def'. If we see another use of this Def between
198 // the Def and the MI, SeenIntermediateUse becomes 'true'.
199 MachineInstr *getForwardingDefMI(MachineInstr &MI,
200 unsigned &OpNoForForwarding,
201 bool &SeenIntermediateUse) const;
202
203 // Can the user MI have it's source at index \p OpNoForForwarding
204 // forwarded from an add-immediate that feeds it?
205 bool isUseMIElgibleForForwarding(MachineInstr &MI, const ImmInstrInfo &III,
206 unsigned OpNoForForwarding) const;
207 bool isDefMIElgibleForForwarding(MachineInstr &DefMI,
208 const ImmInstrInfo &III,
209 MachineOperand *&ImmMO,
210 MachineOperand *&RegMO) const;
211 bool isImmElgibleForForwarding(const MachineOperand &ImmMO,
212 const MachineInstr &DefMI,
213 const ImmInstrInfo &III,
214 int64_t &Imm,
215 int64_t BaseImm = 0) const;
216 bool isRegElgibleForForwarding(const MachineOperand &RegMO,
217 const MachineInstr &DefMI,
218 const MachineInstr &MI, bool KillDefMI,
219 bool &IsFwdFeederRegKilled,
220 bool &SeenIntermediateUse) const;
221 unsigned getSpillTarget() const;
222 ArrayRef<unsigned> getStoreOpcodesForSpillArray() const;
223 ArrayRef<unsigned> getLoadOpcodesForSpillArray() const;
224 unsigned getSpillIndex(const TargetRegisterClass *RC) const;
225 int16_t getFMAOpIdxInfo(unsigned Opcode) const;
226 void reassociateFMA(MachineInstr &Root, MachineCombinerPattern Pattern,
229 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
231 generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty,
232 SmallVectorImpl<MachineInstr *> &InsInstrs) const;
233 virtual void anchor();
234
235protected:
236 /// Commutes the operands in the given instruction.
237 /// The commutable operands are specified by their indices OpIdx1 and OpIdx2.
238 ///
239 /// Do not call this method for a non-commutable instruction or for
240 /// non-commutable pair of operand indices OpIdx1 and OpIdx2.
241 /// Even though the instruction is commutable, the method may still
242 /// fail to commute the operands, null pointer is returned in such cases.
243 ///
244 /// For example, we can commute rlwimi instructions, but only if the
245 /// rotate amt is zero. We also have to munge the immediates a bit.
247 unsigned OpIdx1,
248 unsigned OpIdx2) const override;
249
250public:
251 explicit PPCInstrInfo(PPCSubtarget &STI);
252
255
256 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
257 /// such, whenever a client has an instance of instruction info, it should
258 /// always be able to get register info as well (through this method).
259 ///
260 const PPCRegisterInfo &getRegisterInfo() const { return RI; }
261
262 bool isXFormMemOp(unsigned Opcode) const {
263 return get(Opcode).TSFlags & PPCII::XFormMemOp;
264 }
265 bool isPrefixed(unsigned Opcode) const {
266 return get(Opcode).TSFlags & PPCII::Prefixed;
267 }
268 bool isSExt32To64(unsigned Opcode) const {
269 return get(Opcode).TSFlags & PPCII::SExt32To64;
270 }
271 bool isZExt32To64(unsigned Opcode) const {
272 return get(Opcode).TSFlags & PPCII::ZExt32To64;
273 }
274
275 static bool isSameClassPhysRegCopy(unsigned Opcode) {
276 unsigned CopyOpcodes[] = {PPC::OR, PPC::OR8, PPC::FMR,
277 PPC::VOR, PPC::XXLOR, PPC::XXLORf,
278 PPC::XSCPSGNDP, PPC::MCRF, PPC::CROR,
279 PPC::EVOR, -1U};
280 for (int i = 0; CopyOpcodes[i] != -1U; i++)
281 if (Opcode == CopyOpcodes[i])
282 return true;
283 return false;
284 }
285
288 const ScheduleDAG *DAG) const override;
291 const ScheduleDAG *DAG) const override;
292
293 unsigned getInstrLatency(const InstrItineraryData *ItinData,
294 const MachineInstr &MI,
295 unsigned *PredCost = nullptr) const override;
296
297 std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData,
298 const MachineInstr &DefMI,
299 unsigned DefIdx,
300 const MachineInstr &UseMI,
301 unsigned UseIdx) const override;
302 std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData,
303 SDNode *DefNode, unsigned DefIdx,
304 SDNode *UseNode,
305 unsigned UseIdx) const override {
306 return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx,
307 UseNode, UseIdx);
308 }
309
310 bool hasLowDefLatency(const TargetSchedModel &SchedModel,
311 const MachineInstr &DefMI,
312 unsigned DefIdx) const override {
313 // Machine LICM should hoist all instructions in low-register-pressure
314 // situations; none are sufficiently free to justify leaving in a loop
315 // body.
316 return false;
317 }
318
319 bool useMachineCombiner() const override {
320 return true;
321 }
322
323 /// When getMachineCombinerPatterns() finds patterns, this function generates
324 /// the instructions that could replace the original code sequence
329 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
330
331 /// Return true when there is potentially a faster code sequence for a fma
332 /// chain ending in \p Root. All potential patterns are output in the \p
333 /// P array.
334 bool getFMAPatterns(MachineInstr &Root,
336 bool DoRegPressureReduce) const;
337
338 /// Return true when there is potentially a faster code sequence
339 /// for an instruction chain ending in <Root>. All potential patterns are
340 /// output in the <Pattern> array.
343 bool DoRegPressureReduce) const override;
344
345 /// On PowerPC, we leverage machine combiner pass to reduce register pressure
346 /// when the register pressure is high for one BB.
347 /// Return true if register pressure for \p MBB is high and ABI is supported
348 /// to reduce register pressure. Otherwise return false.
350 const MachineBasicBlock *MBB,
351 const RegisterClassInfo *RegClassInfo) const override;
352
353 /// Fixup the placeholders we put in genAlternativeCodeSequence() for
354 /// MachineCombiner.
355 void
357 SmallVectorImpl<MachineInstr *> &InsInstrs) const override;
358
360 bool Invert) const override;
361
362 /// On PowerPC, we try to reassociate FMA chain which will increase
363 /// instruction size. Set extension resource length limit to 1 for edge case.
364 /// Resource Length is calculated by scaled resource usage in getCycles().
365 /// Because of the division in getCycles(), it returns different cycles due to
366 /// legacy scaled resource usage. So new resource length may be same with
367 /// legacy or 1 bigger than legacy.
368 /// We need to execlude the 1 bigger case even the resource length is not
369 /// perserved for more FMA chain reassociations on PowerPC.
370 int getExtendResourceLenLimit() const override { return 1; }
371
372 // PowerPC specific version of setSpecialOperandAttr that copies Flags to MI
373 // and clears nuw, nsw, and exact flags.
375 void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const;
376
378 Register &SrcReg, Register &DstReg,
379 unsigned &SubIdx) const override;
380 unsigned isLoadFromStackSlot(const MachineInstr &MI,
381 int &FrameIndex) const override;
382 bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
383 unsigned isStoreToStackSlot(const MachineInstr &MI,
384 int &FrameIndex) const override;
385
386 bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
387 unsigned &SrcOpIdx2) const override;
388
390 MachineBasicBlock::iterator MI) const override;
391
392
393 // Branch analysis.
395 MachineBasicBlock *&FBB,
397 bool AllowModify) const override;
399 int *BytesRemoved = nullptr) const override;
402 const DebugLoc &DL,
403 int *BytesAdded = nullptr) const override;
404
405 // Select analysis.
407 Register, Register, Register, int &, int &,
408 int &) const override;
410 const DebugLoc &DL, Register DstReg,
412 Register FalseReg) const override;
413
415 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
416 bool KillSrc) const override;
417
420 bool isKill, int FrameIndex,
421 const TargetRegisterClass *RC,
422 const TargetRegisterInfo *TRI,
423 Register VReg) const override;
424
425 // Emits a register spill without updating the register class for vector
426 // registers. This ensures that when we spill a vector register the
427 // element order in the register is the same as it was in memory.
430 unsigned SrcReg, bool isKill, int FrameIndex,
431 const TargetRegisterClass *RC,
432 const TargetRegisterInfo *TRI) const;
433
436 int FrameIndex, const TargetRegisterClass *RC,
437 const TargetRegisterInfo *TRI,
438 Register VReg) const override;
439
440 // Emits a register reload without updating the register class for vector
441 // registers. This ensures that when we reload a vector register the
442 // element order in the register is the same as it was in memory.
445 unsigned DestReg, int FrameIndex,
446 const TargetRegisterClass *RC,
447 const TargetRegisterInfo *TRI) const;
448
449 unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const;
450
451 unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const;
452
453 bool
455
457 MachineRegisterInfo *MRI) const override;
458
460 Register Reg) const;
461
462 // If conversion by predication (only supported by some branch instructions).
463 // All of the profitability checks always return true; it is always
464 // profitable to use the predicated branches.
466 unsigned NumCycles, unsigned ExtraPredCycles,
467 BranchProbability Probability) const override {
468 return true;
469 }
470
472 unsigned NumT, unsigned ExtraT,
473 MachineBasicBlock &FMBB,
474 unsigned NumF, unsigned ExtraF,
475 BranchProbability Probability) const override;
476
478 BranchProbability Probability) const override {
479 return true;
480 }
481
483 MachineBasicBlock &FMBB) const override {
484 return false;
485 }
486
487 // Predication support.
488 bool isPredicated(const MachineInstr &MI) const override;
489
491 const MachineBasicBlock *MBB,
492 const MachineFunction &MF) const override;
493
495 ArrayRef<MachineOperand> Pred) const override;
496
498 ArrayRef<MachineOperand> Pred2) const override;
499
500 bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
501 bool SkipDead) const override;
502
503 // Comparison optimization.
504
505 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
506 Register &SrcReg2, int64_t &Mask,
507 int64_t &Value) const override;
508
509 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
510 Register SrcReg2, int64_t Mask, int64_t Value,
511 const MachineRegisterInfo *MRI) const override;
512
513
514 /// Return true if get the base operand, byte offset of an instruction and
515 /// the memory width. Width is the size of memory that is being
516 /// loaded/stored (e.g. 1, 2, 4, 8).
518 const MachineOperand *&BaseOp,
519 int64_t &Offset, unsigned &Width,
520 const TargetRegisterInfo *TRI) const;
521
522 bool optimizeCmpPostRA(MachineInstr &MI) const;
523
524 /// Get the base operand and byte offset of an instruction that reads/writes
525 /// memory.
527 const MachineInstr &LdSt,
529 bool &OffsetIsScalable, unsigned &Width,
530 const TargetRegisterInfo *TRI) const override;
531
532 /// Returns true if the two given memory operations should be scheduled
533 /// adjacent.
536 unsigned ClusterSize,
537 unsigned NumBytes) const override;
538
539 /// Return true if two MIs access different memory addresses and false
540 /// otherwise
541 bool
543 const MachineInstr &MIb) const override;
544
545 /// GetInstSize - Return the number of bytes of code the specified
546 /// instruction may be. This returns the maximum number of bytes.
547 ///
548 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
549
550 MCInst getNop() const override;
551
552 std::pair<unsigned, unsigned>
553 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
554
557
560
561 // Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction.
563
564 // Lower pseudo instructions after register allocation.
565 bool expandPostRAPseudo(MachineInstr &MI) const override;
566
567 const TargetRegisterClass *updatedRC(const TargetRegisterClass *RC) const;
568 static int getRecordFormOpcode(unsigned Opcode);
569
570 bool isTOCSaveMI(const MachineInstr &MI) const;
571
572 std::pair<bool, bool>
573 isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth,
574 const MachineRegisterInfo *MRI) const;
575
576 // Return true if the register is sign-extended from 32 to 64 bits.
577 bool isSignExtended(const unsigned Reg,
578 const MachineRegisterInfo *MRI) const {
579 return isSignOrZeroExtended(Reg, 0, MRI).first;
580 }
581
582 // Return true if the register is zero-extended from 32 to 64 bits.
583 bool isZeroExtended(const unsigned Reg,
584 const MachineRegisterInfo *MRI) const {
585 return isSignOrZeroExtended(Reg, 0, MRI).second;
586 }
587
589 SmallSet<Register, 4> &RegsToUpdate,
590 MachineInstr **KilledDef = nullptr) const;
591 bool foldFrameOffset(MachineInstr &MI) const;
592 bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const;
593 bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const;
595 bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg,
596 unsigned &XFormOpcode,
597 int64_t &OffsetOfImmInstr,
598 ImmInstrInfo &III) const;
599 bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index,
600 MachineInstr *&ADDIMI, int64_t &OffsetAddi,
601 int64_t OffsetImm) const;
602
603 void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const;
604 void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo,
605 int64_t Imm) const;
606
607 bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III,
608 bool PostRA) const;
609
610 // In PostRA phase, try to find instruction defines \p Reg before \p MI.
611 // \p SeenIntermediate is set to true if uses between DefMI and \p MI exist.
613 bool &SeenIntermediateUse) const;
614
615 // Materialize immediate after RA.
618 const DebugLoc &DL, Register Reg,
619 int64_t Imm) const;
620
621 /// Check \p Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
622 bool isBDNZ(unsigned Opcode) const;
623
624 /// Find the hardware loop instruction used to set-up the specified loop.
625 /// On PPC, we have two instructions used to set-up the hardware loop
626 /// (MTCTRloop, MTCTR8loop) with corresponding endloop (BDNZ, BDNZ8)
627 /// instructions to indicate the end of a loop.
631
632 /// Analyze loop L, which must be a single-basic-block loop, and if the
633 /// conditions can be understood enough produce a PipelinerLoopInfo object.
634 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
635 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
636};
637
638}
639
640#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
#define P(N)
#define LoadOpcodesForSpill
Definition: PPCInstrInfo.h:156
#define StoreOpcodesForSpill
Definition: PPCInstrInfo.h:154
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallSet class.
static constexpr uint32_t Opcode
Definition: aarch32.h:200
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
Itinerary data supplied by a subtarget to be used by a target.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
int getExtendResourceLenLimit() const override
On PowerPC, we try to reassociate FMA chain which will increase instruction size.
Definition: PPCInstrInfo.h:370
bool isPrefixed(unsigned Opcode) const
Definition: PPCInstrInfo.h:265
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &P, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
Definition: PPCInstrInfo.h:262
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
Definition: PPCInstrInfo.h:260
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &P, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool isSExt32To64(unsigned Opcode) const
Definition: PPCInstrInfo.h:268
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const override
Definition: PPCInstrInfo.h:302
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
Definition: PPCInstrInfo.h:583
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Definition: PPCInstrInfo.h:465
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool isZExt32To64(unsigned Opcode) const
Definition: PPCInstrInfo.h:271
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
static bool isSameClassPhysRegCopy(unsigned Opcode)
Definition: PPCInstrInfo.h:275
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
bool useMachineCombiner() const override
Definition: PPCInstrInfo.h:319
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
Definition: PPCInstrInfo.h:482
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const override
Definition: PPCInstrInfo.h:310
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Definition: PPCInstrInfo.h:477
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
Definition: PPCInstrInfo.h:577
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Represents one node in the SelectionDAG.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
@ ZExt32To64
This instruction produced a zero extended result.
@ SExt32To64
This instruction produced a sign extended result.
@ Prefixed
This instruction is prefixed.
@ XFormMemOp
This instruction is an X-Form memory operation.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
SpillOpcodeKey
Definition: PPCInstrInfo.h:66
@ SOK_CRBitSpill
Definition: PPCInstrInfo.h:72
@ SOK_VSXVectorSpill
Definition: PPCInstrInfo.h:74
@ SOK_SpillToVSR
Definition: PPCInstrInfo.h:77
@ SOK_Int4Spill
Definition: PPCInstrInfo.h:67
@ SOK_PairedVecSpill
Definition: PPCInstrInfo.h:78
@ SOK_VectorFloat8Spill
Definition: PPCInstrInfo.h:75
@ SOK_UAccumulatorSpill
Definition: PPCInstrInfo.h:80
@ SOK_PairedG8Spill
Definition: PPCInstrInfo.h:83
@ SOK_VectorFloat4Spill
Definition: PPCInstrInfo.h:76
@ SOK_Float8Spill
Definition: PPCInstrInfo.h:69
@ SOK_Float4Spill
Definition: PPCInstrInfo.h:70
@ SOK_VRVectorSpill
Definition: PPCInstrInfo.h:73
@ SOK_WAccumulatorSpill
Definition: PPCInstrInfo.h:81
@ SOK_SPESpill
Definition: PPCInstrInfo.h:82
@ SOK_CRSpill
Definition: PPCInstrInfo.h:71
@ SOK_AccumulatorSpill
Definition: PPCInstrInfo.h:79
@ SOK_Int8Spill
Definition: PPCInstrInfo.h:68
@ SOK_LastOpcodeSpill
Definition: PPCInstrInfo.h:84
uint64_t IsSummingOperands
Definition: PPCInstrInfo.h:54
uint64_t OpNoForForwarding
Definition: PPCInstrInfo.h:44
uint64_t ImmMustBeMultipleOf
Definition: PPCInstrInfo.h:34
uint64_t IsCommutative
Definition: PPCInstrInfo.h:42
uint64_t ZeroIsSpecialNew
Definition: PPCInstrInfo.h:40
uint64_t TruncateImmTo
Definition: PPCInstrInfo.h:52
uint64_t ZeroIsSpecialOrig
Definition: PPCInstrInfo.h:37