LLVM 17.0.0git
PPCInstrInfo.h
Go to the documentation of this file.
1//===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the PowerPC implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H
14#define LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H
15
16#include "PPCRegisterInfo.h"
18
19#define GET_INSTRINFO_HEADER
20#include "PPCGenInstrInfo.inc"
21
22namespace llvm {
23
24/// PPCII - This namespace holds all of the PowerPC target-specific
25/// per-instruction flags. These must match the corresponding definitions in
26/// PPC.td and PPCInstrFormats.td.
27namespace PPCII {
28enum {
29 // PPC970 Instruction Flags. These flags describe the characteristics of the
30 // PowerPC 970 (aka G5) dispatch groups and how they are formed out of
31 // raw machine instructions.
32
33 /// PPC970_First - This instruction starts a new dispatch group, so it will
34 /// always be the first one in the group.
36
37 /// PPC970_Single - This instruction starts a new dispatch group and
38 /// terminates it, so it will be the sole instruction in the group.
40
41 /// PPC970_Cracked - This instruction is cracked into two pieces, requiring
42 /// two dispatch pipes to be available to issue.
44
45 /// PPC970_Mask/Shift - This is a bitmask that selects the pipeline type that
46 /// an instruction is issued to.
49};
51 /// These are the various PPC970 execution unit pipelines. Each instruction
52 /// is one of these.
53 PPC970_Pseudo = 0 << PPC970_Shift, // Pseudo instruction
54 PPC970_FXU = 1 << PPC970_Shift, // Fixed Point (aka Integer/ALU) Unit
55 PPC970_LSU = 2 << PPC970_Shift, // Load Store Unit
56 PPC970_FPU = 3 << PPC970_Shift, // Floating Point Unit
57 PPC970_CRU = 4 << PPC970_Shift, // Control Register Unit
58 PPC970_VALU = 5 << PPC970_Shift, // Vector ALU
59 PPC970_VPERM = 6 << PPC970_Shift, // Vector Permute Unit
60 PPC970_BRU = 7 << PPC970_Shift // Branch Unit
61};
62
63enum {
64 /// Shift count to bypass PPC970 flags
66
67 /// This instruction is an X-Form memory operation.
69 /// This instruction is prefixed.
70 Prefixed = 0x1 << (NewDef_Shift + 1),
71 /// This instruction produced a sign extended result.
72 SExt32To64 = 0x1 << (NewDef_Shift + 2),
73 /// This instruction produced a zero extended result.
74 ZExt32To64 = 0x1 << (NewDef_Shift + 3)
75};
76} // end namespace PPCII
77
78// Instructions that have an immediate form might be convertible to that
79// form if the correct input is a result of a load immediate. In order to
80// know whether the transformation is special, we might need to know some
81// of the details of the two forms.
83 // Is the immediate field in the immediate form signed or unsigned?
85 // Does the immediate need to be a multiple of some value?
87 // Is R0/X0 treated specially by the original r+r instruction?
88 // If so, in which operand?
90 // Is R0/X0 treated specially by the new r+i instruction?
91 // If so, in which operand?
93 // Is the operation commutative?
95 // The operand number to check for add-immediate def.
97 // The operand number for the immediate.
99 // The opcode of the new instruction.
101 // The size of the immediate.
103 // The immediate should be truncated to N bits.
105 // Is the instruction summing the operand
107};
108
109// Information required to convert an instruction to just a materialized
110// immediate.
112 unsigned Imm : 16;
113 unsigned Is64Bit : 1;
114 unsigned SetCR : 1;
115};
116
117// Index into the OpcodesForSpill array.
136 SOK_LastOpcodeSpill // This must be last on the enum.
138
139// Define list of load and store spill opcodes.
140#define NoInstr PPC::INSTRUCTION_LIST_END
141#define Pwr8LoadOpcodes \
142 { \
143 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
144 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX, \
145 PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVLDD, \
146 PPC::RESTORE_QUADWORD \
147 }
148
149#define Pwr9LoadOpcodes \
150 { \
151 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
152 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
153 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, \
154 NoInstr, NoInstr, PPC::RESTORE_QUADWORD \
155 }
156
157#define Pwr10LoadOpcodes \
158 { \
159 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
160 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
161 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \
162 PPC::RESTORE_UACC, NoInstr, NoInstr, PPC::RESTORE_QUADWORD \
163 }
164
165#define FutureLoadOpcodes \
166 { \
167 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \
168 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \
169 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \
170 PPC::RESTORE_UACC, PPC::RESTORE_WACC, NoInstr, PPC::RESTORE_QUADWORD \
171 }
172
173#define Pwr8StoreOpcodes \
174 { \
175 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
176 PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, \
177 PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVSTDD, \
178 PPC::SPILL_QUADWORD \
179 }
180
181#define Pwr9StoreOpcodes \
182 { \
183 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
184 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
185 PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, NoInstr, \
186 PPC::SPILL_QUADWORD \
187 }
188
189#define Pwr10StoreOpcodes \
190 { \
191 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
192 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
193 PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \
194 NoInstr, NoInstr, PPC::SPILL_QUADWORD \
195 }
196
197#define FutureStoreOpcodes \
198 { \
199 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
200 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \
201 PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \
202 PPC::SPILL_WACC, NoInstr, PPC::SPILL_QUADWORD \
203 }
204
205// Initialize arrays for load and store spill opcodes on supported subtargets.
206#define StoreOpcodesForSpill \
207 { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes, FutureStoreOpcodes }
208#define LoadOpcodesForSpill \
209 { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes, FutureLoadOpcodes }
210
211class PPCSubtarget;
213 PPCSubtarget &Subtarget;
214 const PPCRegisterInfo RI;
215 const unsigned StoreSpillOpcodesArray[4][SOK_LastOpcodeSpill] =
217 const unsigned LoadSpillOpcodesArray[4][SOK_LastOpcodeSpill] =
219
220 void StoreRegToStackSlot(MachineFunction &MF, unsigned SrcReg, bool isKill,
221 int FrameIdx, const TargetRegisterClass *RC,
222 SmallVectorImpl<MachineInstr *> &NewMIs) const;
223 void LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
224 unsigned DestReg, int FrameIdx,
225 const TargetRegisterClass *RC,
226 SmallVectorImpl<MachineInstr *> &NewMIs) const;
227
228 // Replace the instruction with single LI if possible. \p DefMI must be LI or
229 // LI8.
230 bool simplifyToLI(MachineInstr &MI, MachineInstr &DefMI,
231 unsigned OpNoForForwarding, MachineInstr **KilledDef) const;
232 // If the inst is imm-form and its register operand is produced by a ADDI, put
233 // the imm into the inst directly and remove the ADDI if possible.
234 bool transformToNewImmFormFedByAdd(MachineInstr &MI, MachineInstr &DefMI,
235 unsigned OpNoForForwarding) const;
236 // If the inst is x-form and has imm-form and one of its operand is produced
237 // by a LI, put the imm into the inst directly and remove the LI if possible.
238 bool transformToImmFormFedByLI(MachineInstr &MI, const ImmInstrInfo &III,
239 unsigned ConstantOpNo,
240 MachineInstr &DefMI) const;
241 // If the inst is x-form and has imm-form and one of its operand is produced
242 // by an add-immediate, try to transform it when possible.
243 bool transformToImmFormFedByAdd(MachineInstr &MI, const ImmInstrInfo &III,
244 unsigned ConstantOpNo, MachineInstr &DefMI,
245 bool KillDefMI) const;
246 // Try to find that, if the instruction 'MI' contains any operand that
247 // could be forwarded from some inst that feeds it. If yes, return the
248 // Def of that operand. And OpNoForForwarding is the operand index in
249 // the 'MI' for that 'Def'. If we see another use of this Def between
250 // the Def and the MI, SeenIntermediateUse becomes 'true'.
251 MachineInstr *getForwardingDefMI(MachineInstr &MI,
252 unsigned &OpNoForForwarding,
253 bool &SeenIntermediateUse) const;
254
255 // Can the user MI have it's source at index \p OpNoForForwarding
256 // forwarded from an add-immediate that feeds it?
257 bool isUseMIElgibleForForwarding(MachineInstr &MI, const ImmInstrInfo &III,
258 unsigned OpNoForForwarding) const;
259 bool isDefMIElgibleForForwarding(MachineInstr &DefMI,
260 const ImmInstrInfo &III,
261 MachineOperand *&ImmMO,
262 MachineOperand *&RegMO) const;
263 bool isImmElgibleForForwarding(const MachineOperand &ImmMO,
264 const MachineInstr &DefMI,
265 const ImmInstrInfo &III,
266 int64_t &Imm,
267 int64_t BaseImm = 0) const;
268 bool isRegElgibleForForwarding(const MachineOperand &RegMO,
269 const MachineInstr &DefMI,
270 const MachineInstr &MI, bool KillDefMI,
271 bool &IsFwdFeederRegKilled,
272 bool &SeenIntermediateUse) const;
273 unsigned getSpillTarget() const;
274 ArrayRef<unsigned> getStoreOpcodesForSpillArray() const;
275 ArrayRef<unsigned> getLoadOpcodesForSpillArray() const;
276 unsigned getSpillIndex(const TargetRegisterClass *RC) const;
277 int16_t getFMAOpIdxInfo(unsigned Opcode) const;
278 void reassociateFMA(MachineInstr &Root, MachineCombinerPattern Pattern,
281 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
283 generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty,
284 SmallVectorImpl<MachineInstr *> &InsInstrs) const;
285 virtual void anchor();
286
287protected:
288 /// Commutes the operands in the given instruction.
289 /// The commutable operands are specified by their indices OpIdx1 and OpIdx2.
290 ///
291 /// Do not call this method for a non-commutable instruction or for
292 /// non-commutable pair of operand indices OpIdx1 and OpIdx2.
293 /// Even though the instruction is commutable, the method may still
294 /// fail to commute the operands, null pointer is returned in such cases.
295 ///
296 /// For example, we can commute rlwimi instructions, but only if the
297 /// rotate amt is zero. We also have to munge the immediates a bit.
299 unsigned OpIdx1,
300 unsigned OpIdx2) const override;
301
302public:
303 explicit PPCInstrInfo(PPCSubtarget &STI);
304
307
308 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
309 /// such, whenever a client has an instance of instruction info, it should
310 /// always be able to get register info as well (through this method).
311 ///
312 const PPCRegisterInfo &getRegisterInfo() const { return RI; }
313
314 bool isXFormMemOp(unsigned Opcode) const {
315 return get(Opcode).TSFlags & PPCII::XFormMemOp;
316 }
317 bool isPrefixed(unsigned Opcode) const {
318 return get(Opcode).TSFlags & PPCII::Prefixed;
319 }
320 bool isSExt32To64(unsigned Opcode) const {
321 return get(Opcode).TSFlags & PPCII::SExt32To64;
322 }
323 bool isZExt32To64(unsigned Opcode) const {
324 return get(Opcode).TSFlags & PPCII::ZExt32To64;
325 }
326
327 /// Check if Opcode corresponds to a call instruction that should be marked
328 /// with the NOTOC relocation.
329 bool isNoTOCCallInstr(unsigned Opcode) const {
330 if (!get(Opcode).isCall())
331 return false;
332
333 switch (Opcode) {
334 default:
335#ifndef NDEBUG
336 llvm_unreachable("Unknown call opcode");
337#endif
338 return false;
339 case PPC::BL8_NOTOC:
340 case PPC::BL8_NOTOC_TLS:
341 case PPC::BL8_NOTOC_RM:
342 return true;
343#ifndef NDEBUG
344 case PPC::BL8:
345 case PPC::BL:
346 case PPC::BL8_TLS:
347 case PPC::BL_TLS:
348 case PPC::BLA8:
349 case PPC::BLA:
350 case PPC::BCCL:
351 case PPC::BCCLA:
352 case PPC::BCL:
353 case PPC::BCLn:
354 case PPC::BL8_NOP:
355 case PPC::BL_NOP:
356 case PPC::BL8_NOP_TLS:
357 case PPC::BLA8_NOP:
358 case PPC::BCTRL8:
359 case PPC::BCTRL:
360 case PPC::BCCCTRL8:
361 case PPC::BCCCTRL:
362 case PPC::BCCTRL8:
363 case PPC::BCCTRL:
364 case PPC::BCCTRL8n:
365 case PPC::BCCTRLn:
366 case PPC::BL8_RM:
367 case PPC::BLA8_RM:
368 case PPC::BL8_NOP_RM:
369 case PPC::BLA8_NOP_RM:
370 case PPC::BCTRL8_RM:
371 case PPC::BCTRL8_LDinto_toc:
372 case PPC::BCTRL8_LDinto_toc_RM:
373 case PPC::BL8_TLS_:
374 case PPC::TCRETURNdi8:
375 case PPC::TCRETURNai8:
376 case PPC::TCRETURNri8:
377 case PPC::TAILBCTR8:
378 case PPC::TAILB8:
379 case PPC::TAILBA8:
380 case PPC::BCLalways:
381 case PPC::BLRL:
382 case PPC::BCCLRL:
383 case PPC::BCLRL:
384 case PPC::BCLRLn:
385 case PPC::BDZL:
386 case PPC::BDNZL:
387 case PPC::BDZLA:
388 case PPC::BDNZLA:
389 case PPC::BDZLp:
390 case PPC::BDNZLp:
391 case PPC::BDZLAp:
392 case PPC::BDNZLAp:
393 case PPC::BDZLm:
394 case PPC::BDNZLm:
395 case PPC::BDZLAm:
396 case PPC::BDNZLAm:
397 case PPC::BDZLRL:
398 case PPC::BDNZLRL:
399 case PPC::BDZLRLp:
400 case PPC::BDNZLRLp:
401 case PPC::BDZLRLm:
402 case PPC::BDNZLRLm:
403 case PPC::BL_RM:
404 case PPC::BLA_RM:
405 case PPC::BL_NOP_RM:
406 case PPC::BCTRL_RM:
407 case PPC::TCRETURNdi:
408 case PPC::TCRETURNai:
409 case PPC::TCRETURNri:
410 case PPC::BCTRL_LWZinto_toc:
411 case PPC::BCTRL_LWZinto_toc_RM:
412 case PPC::TAILBCTR:
413 case PPC::TAILB:
414 case PPC::TAILBA:
415 return false;
416#endif
417 }
418 }
419
420 static bool isSameClassPhysRegCopy(unsigned Opcode) {
421 unsigned CopyOpcodes[] = {PPC::OR, PPC::OR8, PPC::FMR,
422 PPC::VOR, PPC::XXLOR, PPC::XXLORf,
423 PPC::XSCPSGNDP, PPC::MCRF, PPC::CROR,
424 PPC::EVOR, -1U};
425 for (int i = 0; CopyOpcodes[i] != -1U; i++)
426 if (Opcode == CopyOpcodes[i])
427 return true;
428 return false;
429 }
430
433 const ScheduleDAG *DAG) const override;
436 const ScheduleDAG *DAG) const override;
437
438 unsigned getInstrLatency(const InstrItineraryData *ItinData,
439 const MachineInstr &MI,
440 unsigned *PredCost = nullptr) const override;
441
442 int getOperandLatency(const InstrItineraryData *ItinData,
443 const MachineInstr &DefMI, unsigned DefIdx,
444 const MachineInstr &UseMI,
445 unsigned UseIdx) const override;
447 SDNode *DefNode, unsigned DefIdx,
448 SDNode *UseNode, unsigned UseIdx) const override {
449 return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx,
450 UseNode, UseIdx);
451 }
452
453 bool hasLowDefLatency(const TargetSchedModel &SchedModel,
454 const MachineInstr &DefMI,
455 unsigned DefIdx) const override {
456 // Machine LICM should hoist all instructions in low-register-pressure
457 // situations; none are sufficiently free to justify leaving in a loop
458 // body.
459 return false;
460 }
461
462 bool useMachineCombiner() const override {
463 return true;
464 }
465
466 /// When getMachineCombinerPatterns() finds patterns, this function generates
467 /// the instructions that could replace the original code sequence
472 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
473
474 /// Return true when there is potentially a faster code sequence for a fma
475 /// chain ending in \p Root. All potential patterns are output in the \p
476 /// P array.
477 bool getFMAPatterns(MachineInstr &Root,
479 bool DoRegPressureReduce) const;
480
481 /// Return true when there is potentially a faster code sequence
482 /// for an instruction chain ending in <Root>. All potential patterns are
483 /// output in the <Pattern> array.
486 bool DoRegPressureReduce) const override;
487
488 /// On PowerPC, we leverage machine combiner pass to reduce register pressure
489 /// when the register pressure is high for one BB.
490 /// Return true if register pressure for \p MBB is high and ABI is supported
491 /// to reduce register pressure. Otherwise return false.
493 const MachineBasicBlock *MBB,
494 const RegisterClassInfo *RegClassInfo) const override;
495
496 /// Fixup the placeholders we put in genAlternativeCodeSequence() for
497 /// MachineCombiner.
498 void
500 SmallVectorImpl<MachineInstr *> &InsInstrs) const override;
501
503 bool Invert) const override;
504
505 /// On PowerPC, we try to reassociate FMA chain which will increase
506 /// instruction size. Set extension resource length limit to 1 for edge case.
507 /// Resource Length is calculated by scaled resource usage in getCycles().
508 /// Because of the division in getCycles(), it returns different cycles due to
509 /// legacy scaled resource usage. So new resource length may be same with
510 /// legacy or 1 bigger than legacy.
511 /// We need to execlude the 1 bigger case even the resource length is not
512 /// perserved for more FMA chain reassociations on PowerPC.
513 int getExtendResourceLenLimit() const override { return 1; }
514
515 void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
516 MachineInstr &NewMI1,
517 MachineInstr &NewMI2) const override;
518
519 // PowerPC specific version of setSpecialOperandAttr that copies Flags to MI
520 // and clears nuw, nsw, and exact flags.
522
524 Register &SrcReg, Register &DstReg,
525 unsigned &SubIdx) const override;
526 unsigned isLoadFromStackSlot(const MachineInstr &MI,
527 int &FrameIndex) const override;
528 bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
529 unsigned isStoreToStackSlot(const MachineInstr &MI,
530 int &FrameIndex) const override;
531
532 bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
533 unsigned &SrcOpIdx2) const override;
534
536 MachineBasicBlock::iterator MI) const override;
537
538
539 // Branch analysis.
541 MachineBasicBlock *&FBB,
543 bool AllowModify) const override;
545 int *BytesRemoved = nullptr) const override;
548 const DebugLoc &DL,
549 int *BytesAdded = nullptr) const override;
550
551 // Select analysis.
553 Register, Register, Register, int &, int &,
554 int &) const override;
556 const DebugLoc &DL, Register DstReg,
558 Register FalseReg) const override;
559
561 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
562 bool KillSrc) const override;
563
566 bool isKill, int FrameIndex,
567 const TargetRegisterClass *RC,
568 const TargetRegisterInfo *TRI,
569 Register VReg) const override;
570
571 // Emits a register spill without updating the register class for vector
572 // registers. This ensures that when we spill a vector register the
573 // element order in the register is the same as it was in memory.
576 unsigned SrcReg, bool isKill, int FrameIndex,
577 const TargetRegisterClass *RC,
578 const TargetRegisterInfo *TRI) const;
579
582 int FrameIndex, const TargetRegisterClass *RC,
583 const TargetRegisterInfo *TRI,
584 Register VReg) const override;
585
586 // Emits a register reload without updating the register class for vector
587 // registers. This ensures that when we reload a vector register the
588 // element order in the register is the same as it was in memory.
591 unsigned DestReg, int FrameIndex,
592 const TargetRegisterClass *RC,
593 const TargetRegisterInfo *TRI) const;
594
595 unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const;
596
597 unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const;
598
599 bool
601
603 MachineRegisterInfo *MRI) const override;
604
606 Register Reg) const;
607
608 // If conversion by predication (only supported by some branch instructions).
609 // All of the profitability checks always return true; it is always
610 // profitable to use the predicated branches.
612 unsigned NumCycles, unsigned ExtraPredCycles,
613 BranchProbability Probability) const override {
614 return true;
615 }
616
618 unsigned NumT, unsigned ExtraT,
619 MachineBasicBlock &FMBB,
620 unsigned NumF, unsigned ExtraF,
621 BranchProbability Probability) const override;
622
624 BranchProbability Probability) const override {
625 return true;
626 }
627
629 MachineBasicBlock &FMBB) const override {
630 return false;
631 }
632
633 // Predication support.
634 bool isPredicated(const MachineInstr &MI) const override;
635
637 const MachineBasicBlock *MBB,
638 const MachineFunction &MF) const override;
639
641 ArrayRef<MachineOperand> Pred) const override;
642
644 ArrayRef<MachineOperand> Pred2) const override;
645
646 bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
647 bool SkipDead) const override;
648
649 // Comparison optimization.
650
651 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
652 Register &SrcReg2, int64_t &Mask,
653 int64_t &Value) const override;
654
655 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
656 Register SrcReg2, int64_t Mask, int64_t Value,
657 const MachineRegisterInfo *MRI) const override;
658
659
660 /// Return true if get the base operand, byte offset of an instruction and
661 /// the memory width. Width is the size of memory that is being
662 /// loaded/stored (e.g. 1, 2, 4, 8).
664 const MachineOperand *&BaseOp,
665 int64_t &Offset, unsigned &Width,
666 const TargetRegisterInfo *TRI) const;
667
668 bool optimizeCmpPostRA(MachineInstr &MI) const;
669
670 /// Get the base operand and byte offset of an instruction that reads/writes
671 /// memory.
673 const MachineInstr &LdSt,
675 bool &OffsetIsScalable, unsigned &Width,
676 const TargetRegisterInfo *TRI) const override;
677
678 /// Returns true if the two given memory operations should be scheduled
679 /// adjacent.
682 unsigned NumLoads, unsigned NumBytes) const override;
683
684 /// Return true if two MIs access different memory addresses and false
685 /// otherwise
686 bool
688 const MachineInstr &MIb) const override;
689
690 /// GetInstSize - Return the number of bytes of code the specified
691 /// instruction may be. This returns the maximum number of bytes.
692 ///
693 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
694
695 MCInst getNop() const override;
696
697 std::pair<unsigned, unsigned>
698 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
699
702
705
706 // Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction.
708
709 // Lower pseudo instructions after register allocation.
710 bool expandPostRAPseudo(MachineInstr &MI) const override;
711
712 static bool isVFRegister(unsigned Reg) {
713 return Reg >= PPC::VF0 && Reg <= PPC::VF31;
714 }
715 static bool isVRRegister(unsigned Reg) {
716 return Reg >= PPC::V0 && Reg <= PPC::V31;
717 }
718 const TargetRegisterClass *updatedRC(const TargetRegisterClass *RC) const;
719 static int getRecordFormOpcode(unsigned Opcode);
720
721 bool isTOCSaveMI(const MachineInstr &MI) const;
722
723 std::pair<bool, bool>
724 isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth,
725 const MachineRegisterInfo *MRI) const;
726
727 // Return true if the register is sign-extended from 32 to 64 bits.
728 bool isSignExtended(const unsigned Reg,
729 const MachineRegisterInfo *MRI) const {
730 return isSignOrZeroExtended(Reg, 0, MRI).first;
731 }
732
733 // Return true if the register is zero-extended from 32 to 64 bits.
734 bool isZeroExtended(const unsigned Reg,
735 const MachineRegisterInfo *MRI) const {
736 return isSignOrZeroExtended(Reg, 0, MRI).second;
737 }
738
740 MachineInstr **KilledDef = nullptr) const;
741 bool foldFrameOffset(MachineInstr &MI) const;
742 bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const;
743 bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const;
745 bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg,
746 unsigned &XFormOpcode,
747 int64_t &OffsetOfImmInstr,
748 ImmInstrInfo &III) const;
749 bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index,
750 MachineInstr *&ADDIMI, int64_t &OffsetAddi,
751 int64_t OffsetImm) const;
752
753 /// Fixup killed/dead flag for register \p RegNo between instructions [\p
754 /// StartMI, \p EndMI]. Some pre-RA or post-RA transformations may violate
755 /// register killed/dead flags semantics, this function can be called to fix
756 /// up. Before calling this function,
757 /// 1. Ensure that \p RegNo liveness is killed after instruction \p EndMI.
758 /// 2. Ensure that there is no new definition between (\p StartMI, \p EndMI)
759 /// and possible definition for \p RegNo is \p StartMI or \p EndMI. For
760 /// pre-RA cases, definition may be \p StartMI through COPY, \p StartMI
761 /// will be adjust to true definition.
762 /// 3. We can do accurate fixup for the case when all instructions between
763 /// [\p StartMI, \p EndMI] are in same basic block.
764 /// 4. For the case when \p StartMI and \p EndMI are not in same basic block,
765 /// we conservatively clear kill flag for all uses of \p RegNo for pre-RA
766 /// and for post-RA, we give an assertion as without reaching definition
767 /// analysis post-RA, \p StartMI and \p EndMI are hard to keep right.
768 void fixupIsDeadOrKill(MachineInstr *StartMI, MachineInstr *EndMI,
769 unsigned RegNo) const;
770 void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const;
771 void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo,
772 int64_t Imm) const;
773
774 bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III,
775 bool PostRA) const;
776
777 // In PostRA phase, try to find instruction defines \p Reg before \p MI.
778 // \p SeenIntermediate is set to true if uses between DefMI and \p MI exist.
780 bool &SeenIntermediateUse) const;
781
782 // Materialize immediate after RA.
785 const DebugLoc &DL, Register Reg,
786 int64_t Imm) const;
787
788 /// getRegNumForOperand - some operands use different numbering schemes
789 /// for the same registers. For example, a VSX instruction may have any of
790 /// vs0-vs63 allocated whereas an Altivec instruction could only have
791 /// vs32-vs63 allocated (numbered as v0-v31). This function returns the actual
792 /// register number needed for the opcode/operand number combination.
793 /// The operand number argument will be useful when we need to extend this
794 /// to instructions that use both Altivec and VSX numbering (for different
795 /// operands).
796 static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg,
797 unsigned OpNo) {
798 int16_t regClass = Desc.operands()[OpNo].RegClass;
799 switch (regClass) {
800 // We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31,
801 // VSX32-VSX63 during encoding/disassembling
802 case PPC::VSSRCRegClassID:
803 case PPC::VSFRCRegClassID:
804 if (isVFRegister(Reg))
805 return PPC::VSX32 + (Reg - PPC::VF0);
806 break;
807 // We store VSL0-VSL31, V0-V31 in MCOperand and it should be VSL0-VSL31,
808 // VSX32-VSX63 during encoding/disassembling
809 case PPC::VSRCRegClassID:
810 if (isVRRegister(Reg))
811 return PPC::VSX32 + (Reg - PPC::V0);
812 break;
813 // Other RegClass doesn't need mapping
814 default:
815 break;
816 }
817 return Reg;
818 }
819
820 /// Check \p Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
821 bool isBDNZ(unsigned Opcode) const;
822
823 /// Find the hardware loop instruction used to set-up the specified loop.
824 /// On PPC, we have two instructions used to set-up the hardware loop
825 /// (MTCTRloop, MTCTR8loop) with corresponding endloop (BDNZ, BDNZ8)
826 /// instructions to indicate the end of a loop.
830
831 /// Analyze loop L, which must be a single-basic-block loop, and if the
832 /// conditions can be understood enough produce a PipelinerLoopInfo object.
833 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
834 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
835};
836
837}
838
839#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
SmallVector< MachineOperand, 4 > Cond
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
#define P(N)
#define LoadOpcodesForSpill
Definition: PPCInstrInfo.h:208
#define StoreOpcodesForSpill
Definition: PPCInstrInfo.h:206
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
@ Flags
Definition: TextStubV5.cpp:93
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
Itinerary data supplied by a subtarget to be used by a target.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
static bool isVRRegister(unsigned Reg)
Definition: PPCInstrInfo.h:715
int getExtendResourceLenLimit() const override
On PowerPC, we try to reassociate FMA chain which will increase instruction size.
Definition: PPCInstrInfo.h:513
bool isPrefixed(unsigned Opcode) const
Definition: PPCInstrInfo.h:317
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &P, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
void fixupIsDeadOrKill(MachineInstr *StartMI, MachineInstr *EndMI, unsigned RegNo) const
Fixup killed/dead flag for register RegNo between instructions [StartMI, EndMI].
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
Definition: PPCInstrInfo.h:314
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
Definition: PPCInstrInfo.h:312
static bool isVFRegister(unsigned Reg)
Definition: PPCInstrInfo.h:712
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned NumLoads, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &P, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
This is an architecture-specific helper function of reassociateOps.
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool isSExt32To64(unsigned Opcode) const
Definition: PPCInstrInfo.h:320
bool convertToImmediateForm(MachineInstr &MI, MachineInstr **KilledDef=nullptr) const
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
Definition: PPCInstrInfo.h:734
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Definition: PPCInstrInfo.h:611
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool isZExt32To64(unsigned Opcode) const
Definition: PPCInstrInfo.h:323
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
static bool isSameClassPhysRegCopy(unsigned Opcode)
Definition: PPCInstrInfo.h:420
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
bool useMachineCombiner() const override
Definition: PPCInstrInfo.h:462
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
Definition: PPCInstrInfo.h:628
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const override
Definition: PPCInstrInfo.h:453
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Definition: PPCInstrInfo.h:623
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool isNoTOCCallInstr(unsigned Opcode) const
Check if Opcode corresponds to a call instruction that should be marked with the NOTOC relocation.
Definition: PPCInstrInfo.h:329
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
Definition: PPCInstrInfo.h:728
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const override
Definition: PPCInstrInfo.h:446
int getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg, unsigned OpNo)
getRegNumForOperand - some operands use different numbering schemes for the same registers.
Definition: PPCInstrInfo.h:796
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Represents one node in the SelectionDAG.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ ZExt32To64
This instruction produced a zero extended result.
Definition: PPCInstrInfo.h:74
@ NewDef_Shift
Shift count to bypass PPC970 flags.
Definition: PPCInstrInfo.h:65
@ SExt32To64
This instruction produced a sign extended result.
Definition: PPCInstrInfo.h:72
@ Prefixed
This instruction is prefixed.
Definition: PPCInstrInfo.h:70
@ XFormMemOp
This instruction is an X-Form memory operation.
Definition: PPCInstrInfo.h:68
@ PPC970_First
PPC970_First - This instruction starts a new dispatch group, so it will always be the first one in th...
Definition: PPCInstrInfo.h:35
@ PPC970_Cracked
PPC970_Cracked - This instruction is cracked into two pieces, requiring two dispatch pipes to be avai...
Definition: PPCInstrInfo.h:43
@ PPC970_Shift
PPC970_Mask/Shift - This is a bitmask that selects the pipeline type that an instruction is issued to...
Definition: PPCInstrInfo.h:47
@ PPC970_Single
PPC970_Single - This instruction starts a new dispatch group and terminates it, so it will be the sol...
Definition: PPCInstrInfo.h:39
@ PPC970_Pseudo
These are the various PPC970 execution unit pipelines.
Definition: PPCInstrInfo.h:53
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
SpillOpcodeKey
Definition: PPCInstrInfo.h:118
@ SOK_CRBitSpill
Definition: PPCInstrInfo.h:124
@ SOK_VSXVectorSpill
Definition: PPCInstrInfo.h:126
@ SOK_SpillToVSR
Definition: PPCInstrInfo.h:129
@ SOK_Int4Spill
Definition: PPCInstrInfo.h:119
@ SOK_PairedVecSpill
Definition: PPCInstrInfo.h:130
@ SOK_VectorFloat8Spill
Definition: PPCInstrInfo.h:127
@ SOK_UAccumulatorSpill
Definition: PPCInstrInfo.h:132
@ SOK_PairedG8Spill
Definition: PPCInstrInfo.h:135
@ SOK_VectorFloat4Spill
Definition: PPCInstrInfo.h:128
@ SOK_Float8Spill
Definition: PPCInstrInfo.h:121
@ SOK_Float4Spill
Definition: PPCInstrInfo.h:122
@ SOK_VRVectorSpill
Definition: PPCInstrInfo.h:125
@ SOK_WAccumulatorSpill
Definition: PPCInstrInfo.h:133
@ SOK_SPESpill
Definition: PPCInstrInfo.h:134
@ SOK_CRSpill
Definition: PPCInstrInfo.h:123
@ SOK_AccumulatorSpill
Definition: PPCInstrInfo.h:131
@ SOK_Int8Spill
Definition: PPCInstrInfo.h:120
@ SOK_LastOpcodeSpill
Definition: PPCInstrInfo.h:136
uint64_t IsSummingOperands
Definition: PPCInstrInfo.h:106
uint64_t OpNoForForwarding
Definition: PPCInstrInfo.h:96
uint64_t ImmMustBeMultipleOf
Definition: PPCInstrInfo.h:86
uint64_t IsCommutative
Definition: PPCInstrInfo.h:94
uint64_t ZeroIsSpecialNew
Definition: PPCInstrInfo.h:92
uint64_t TruncateImmTo
Definition: PPCInstrInfo.h:104
uint64_t ZeroIsSpecialOrig
Definition: PPCInstrInfo.h:89