LLVM 20.0.0git
PPCRegisterInfo.cpp
Go to the documentation of this file.
1//===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the PowerPC implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "PPCRegisterInfo.h"
15#include "PPCFrameLowering.h"
16#include "PPCInstrBuilder.h"
18#include "PPCSubtarget.h"
19#include "PPCTargetMachine.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/Statistic.h"
31#include "llvm/IR/CallingConv.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Type.h"
35#include "llvm/Support/Debug.h"
41#include <cstdlib>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "reginfo"
46
47#define GET_REGINFO_TARGET_DESC
48#include "PPCGenRegisterInfo.inc"
49
50STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass");
51STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass");
52
53static cl::opt<bool>
54EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
55 cl::desc("Enable use of a base pointer for complex stack frames"));
56
57static cl::opt<bool>
58AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false),
59 cl::desc("Force the use of a base pointer in every function"));
60
61static cl::opt<bool>
62EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false),
63 cl::desc("Enable spills from gpr to vsr rather than stack"));
64
65static cl::opt<bool>
66StackPtrConst("ppc-stack-ptr-caller-preserved",
67 cl::desc("Consider R1 caller preserved so stack saves of "
68 "caller preserved registers can be LICM candidates"),
69 cl::init(true), cl::Hidden);
70
72MaxCRBitSpillDist("ppc-max-crbit-spill-dist",
73 cl::desc("Maximum search distance for definition of CR bit "
74 "spill on ppc"),
75 cl::Hidden, cl::init(100));
76
77// Copies/moves of physical accumulators are expensive operations
78// that should be avoided whenever possible. MMA instructions are
79// meant to be used in performance-sensitive computational kernels.
80// This option is provided, at least for the time being, to give the
81// user a tool to detect this expensive operation and either rework
82// their code or report a compiler bug if that turns out to be the
83// cause.
84#ifndef NDEBUG
85static cl::opt<bool>
86ReportAccMoves("ppc-report-acc-moves",
87 cl::desc("Emit information about accumulator register spills "
88 "and copies"),
89 cl::Hidden, cl::init(false));
90#endif
91
93
94static unsigned offsetMinAlignForOpcode(unsigned OpC);
95
97 : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR,
98 TM.isPPC64() ? 0 : 1,
99 TM.isPPC64() ? 0 : 1),
100 TM(TM) {
101 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
102 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
103 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
104 ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX;
105 ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX;
106 ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX;
107 ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX;
108 ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
109 ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
110
111 // 64-bit
112 ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
113 ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
114 ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
115 ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
116 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8;
117 ImmToIdxMap[PPC::LQ] = PPC::LQX_PSEUDO;
118 ImmToIdxMap[PPC::STQ] = PPC::STQX_PSEUDO;
119
120 // VSX
121 ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX;
122 ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX;
123 ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX;
124 ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX;
125 ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX;
126 ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX;
127 ImmToIdxMap[PPC::LXV] = PPC::LXVX;
128 ImmToIdxMap[PPC::LXSD] = PPC::LXSDX;
129 ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX;
130 ImmToIdxMap[PPC::STXV] = PPC::STXVX;
131 ImmToIdxMap[PPC::STXSD] = PPC::STXSDX;
132 ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX;
133
134 // SPE
135 ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX;
136 ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX;
137 ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX;
138 ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX;
139
140 // Power10
141 ImmToIdxMap[PPC::PLBZ] = PPC::LBZX; ImmToIdxMap[PPC::PLBZ8] = PPC::LBZX8;
142 ImmToIdxMap[PPC::PLHZ] = PPC::LHZX; ImmToIdxMap[PPC::PLHZ8] = PPC::LHZX8;
143 ImmToIdxMap[PPC::PLHA] = PPC::LHAX; ImmToIdxMap[PPC::PLHA8] = PPC::LHAX8;
144 ImmToIdxMap[PPC::PLWZ] = PPC::LWZX; ImmToIdxMap[PPC::PLWZ8] = PPC::LWZX8;
145 ImmToIdxMap[PPC::PLWA] = PPC::LWAX; ImmToIdxMap[PPC::PLWA8] = PPC::LWAX;
146 ImmToIdxMap[PPC::PLD] = PPC::LDX; ImmToIdxMap[PPC::PSTD] = PPC::STDX;
147
148 ImmToIdxMap[PPC::PSTB] = PPC::STBX; ImmToIdxMap[PPC::PSTB8] = PPC::STBX8;
149 ImmToIdxMap[PPC::PSTH] = PPC::STHX; ImmToIdxMap[PPC::PSTH8] = PPC::STHX8;
150 ImmToIdxMap[PPC::PSTW] = PPC::STWX; ImmToIdxMap[PPC::PSTW8] = PPC::STWX8;
151
152 ImmToIdxMap[PPC::PLFS] = PPC::LFSX; ImmToIdxMap[PPC::PSTFS] = PPC::STFSX;
153 ImmToIdxMap[PPC::PLFD] = PPC::LFDX; ImmToIdxMap[PPC::PSTFD] = PPC::STFDX;
154 ImmToIdxMap[PPC::PLXSSP] = PPC::LXSSPX; ImmToIdxMap[PPC::PSTXSSP] = PPC::STXSSPX;
155 ImmToIdxMap[PPC::PLXSD] = PPC::LXSDX; ImmToIdxMap[PPC::PSTXSD] = PPC::STXSDX;
156 ImmToIdxMap[PPC::PLXV] = PPC::LXVX; ImmToIdxMap[PPC::PSTXV] = PPC::STXVX;
157
158 ImmToIdxMap[PPC::LXVP] = PPC::LXVPX;
159 ImmToIdxMap[PPC::STXVP] = PPC::STXVPX;
160 ImmToIdxMap[PPC::PLXVP] = PPC::LXVPX;
161 ImmToIdxMap[PPC::PSTXVP] = PPC::STXVPX;
162}
163
164/// getPointerRegClass - Return the register class to use to hold pointers.
165/// This is used for addressing modes.
168 const {
169 // Note that PPCInstrInfo::foldImmediate also directly uses this Kind value
170 // when it checks for ZERO folding.
171 if (Kind == 1) {
172 if (TM.isPPC64())
173 return &PPC::G8RC_NOX0RegClass;
174 return &PPC::GPRC_NOR0RegClass;
175 }
176
177 if (TM.isPPC64())
178 return &PPC::G8RCRegClass;
179 return &PPC::GPRCRegClass;
180}
181
182const MCPhysReg*
184 const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
186 if (!TM.isPPC64() && Subtarget.isAIXABI())
187 report_fatal_error("AnyReg unimplemented on 32-bit AIX.");
188 if (Subtarget.hasVSX()) {
189 if (Subtarget.pairedVectorMemops())
190 return CSR_64_AllRegs_VSRP_SaveList;
191 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
192 return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList;
193 return CSR_64_AllRegs_VSX_SaveList;
194 }
195 if (Subtarget.hasAltivec()) {
196 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
197 return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList;
198 return CSR_64_AllRegs_Altivec_SaveList;
199 }
200 return CSR_64_AllRegs_SaveList;
201 }
202
203 // On PPC64, we might need to save r2 (but only if it is not reserved).
204 // We do not need to treat R2 as callee-saved when using PC-Relative calls
205 // because any direct uses of R2 will cause it to be reserved. If the function
206 // is a leaf or the only uses of R2 are implicit uses for calls, the calls
207 // will use the @notoc relocation which will cause this function to set the
208 // st_other bit to 1, thereby communicating to its caller that it arbitrarily
209 // clobbers the TOC.
210 bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) &&
211 !Subtarget.isUsingPCRelativeCalls();
212
213 // Cold calling convention CSRs.
215 if (Subtarget.isAIXABI())
216 report_fatal_error("Cold calling unimplemented on AIX.");
217 if (TM.isPPC64()) {
218 if (Subtarget.pairedVectorMemops())
219 return SaveR2 ? CSR_SVR64_ColdCC_R2_VSRP_SaveList
220 : CSR_SVR64_ColdCC_VSRP_SaveList;
221 if (Subtarget.hasAltivec())
222 return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList
223 : CSR_SVR64_ColdCC_Altivec_SaveList;
224 return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList
225 : CSR_SVR64_ColdCC_SaveList;
226 }
227 // 32-bit targets.
228 if (Subtarget.pairedVectorMemops())
229 return CSR_SVR32_ColdCC_VSRP_SaveList;
230 else if (Subtarget.hasAltivec())
231 return CSR_SVR32_ColdCC_Altivec_SaveList;
232 else if (Subtarget.hasSPE())
233 return CSR_SVR32_ColdCC_SPE_SaveList;
234 return CSR_SVR32_ColdCC_SaveList;
235 }
236 // Standard calling convention CSRs.
237 if (TM.isPPC64()) {
238 if (Subtarget.pairedVectorMemops()) {
239 if (Subtarget.isAIXABI()) {
240 if (!TM.getAIXExtendedAltivecABI())
241 return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
242 return SaveR2 ? CSR_AIX64_R2_VSRP_SaveList : CSR_AIX64_VSRP_SaveList;
243 }
244 return SaveR2 ? CSR_SVR464_R2_VSRP_SaveList : CSR_SVR464_VSRP_SaveList;
245 }
246 if (Subtarget.hasAltivec() &&
247 (!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) {
248 return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList
249 : CSR_PPC64_Altivec_SaveList;
250 }
251 return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
252 }
253 // 32-bit targets.
254 if (Subtarget.isAIXABI()) {
255 if (Subtarget.pairedVectorMemops())
256 return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_VSRP_SaveList
257 : CSR_AIX32_SaveList;
258 if (Subtarget.hasAltivec())
259 return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList
260 : CSR_AIX32_SaveList;
261 return CSR_AIX32_SaveList;
262 }
263 if (Subtarget.pairedVectorMemops())
264 return CSR_SVR432_VSRP_SaveList;
265 if (Subtarget.hasAltivec())
266 return CSR_SVR432_Altivec_SaveList;
267 else if (Subtarget.hasSPE()) {
268 if (TM.isPositionIndependent() && !TM.isPPC64())
269 return CSR_SVR432_SPE_NO_S30_31_SaveList;
270 return CSR_SVR432_SPE_SaveList;
271 }
272 return CSR_SVR432_SaveList;
273}
274
275const uint32_t *
277 CallingConv::ID CC) const {
278 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
279 if (CC == CallingConv::AnyReg) {
280 if (Subtarget.hasVSX()) {
281 if (Subtarget.pairedVectorMemops())
282 return CSR_64_AllRegs_VSRP_RegMask;
283 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
284 return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask;
285 return CSR_64_AllRegs_VSX_RegMask;
286 }
287 if (Subtarget.hasAltivec()) {
288 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
289 return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask;
290 return CSR_64_AllRegs_Altivec_RegMask;
291 }
292 return CSR_64_AllRegs_RegMask;
293 }
294
295 if (Subtarget.isAIXABI()) {
296 if (Subtarget.pairedVectorMemops()) {
297 if (!TM.getAIXExtendedAltivecABI())
298 return TM.isPPC64() ? CSR_PPC64_RegMask : CSR_AIX32_RegMask;
299 return TM.isPPC64() ? CSR_AIX64_VSRP_RegMask : CSR_AIX32_VSRP_RegMask;
300 }
301 return TM.isPPC64()
302 ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
303 ? CSR_PPC64_Altivec_RegMask
304 : CSR_PPC64_RegMask)
305 : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
306 ? CSR_AIX32_Altivec_RegMask
307 : CSR_AIX32_RegMask);
308 }
309
310 if (CC == CallingConv::Cold) {
311 if (TM.isPPC64())
312 return Subtarget.pairedVectorMemops()
313 ? CSR_SVR64_ColdCC_VSRP_RegMask
314 : (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask
315 : CSR_SVR64_ColdCC_RegMask);
316 else
317 return Subtarget.pairedVectorMemops()
318 ? CSR_SVR32_ColdCC_VSRP_RegMask
319 : (Subtarget.hasAltivec()
320 ? CSR_SVR32_ColdCC_Altivec_RegMask
321 : (Subtarget.hasSPE() ? CSR_SVR32_ColdCC_SPE_RegMask
322 : CSR_SVR32_ColdCC_RegMask));
323 }
324
325 if (TM.isPPC64())
326 return Subtarget.pairedVectorMemops()
327 ? CSR_SVR464_VSRP_RegMask
328 : (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask
329 : CSR_PPC64_RegMask);
330 else
331 return Subtarget.pairedVectorMemops()
332 ? CSR_SVR432_VSRP_RegMask
333 : (Subtarget.hasAltivec()
334 ? CSR_SVR432_Altivec_RegMask
335 : (Subtarget.hasSPE()
337 ? CSR_SVR432_SPE_NO_S30_31_RegMask
338 : CSR_SVR432_SPE_RegMask)
339 : CSR_SVR432_RegMask));
340}
341
342const uint32_t*
344 return CSR_NoRegs_RegMask;
345}
346
348 for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
349 Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
350}
351
353 BitVector Reserved(getNumRegs());
354 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
355 const PPCFrameLowering *TFI = getFrameLowering(MF);
356
357 // The ZERO register is not really a register, but the representation of r0
358 // when used in instructions that treat r0 as the constant 0.
359 markSuperRegs(Reserved, PPC::ZERO);
360
361 // The FP register is also not really a register, but is the representation
362 // of the frame pointer register used by ISD::FRAMEADDR.
363 markSuperRegs(Reserved, PPC::FP);
364
365 // The BP register is also not really a register, but is the representation
366 // of the base pointer register used by setjmp.
367 markSuperRegs(Reserved, PPC::BP);
368
369 // The counter registers must be reserved so that counter-based loops can
370 // be correctly formed (and the mtctr instructions are not DCE'd).
371 markSuperRegs(Reserved, PPC::CTR);
372 markSuperRegs(Reserved, PPC::CTR8);
373
374 markSuperRegs(Reserved, PPC::R1);
375 markSuperRegs(Reserved, PPC::LR);
376 markSuperRegs(Reserved, PPC::LR8);
377 markSuperRegs(Reserved, PPC::RM);
378
379 markSuperRegs(Reserved, PPC::VRSAVE);
380
381 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
382 bool UsesTOCBasePtr = FuncInfo->usesTOCBasePtr();
383 // The SVR4 ABI reserves r2 and r13
384 if (Subtarget.isSVR4ABI() || Subtarget.isAIXABI()) {
385 // We only reserve r2 if we need to use the TOC pointer. If we have no
386 // explicit uses of the TOC pointer (meaning we're a leaf function with
387 // no constant-pool loads, etc.) and we have no potential uses inside an
388 // inline asm block, then we can treat r2 has an ordinary callee-saved
389 // register.
390 if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm())
391 markSuperRegs(Reserved, PPC::R2); // System-reserved register.
392
393 if (Subtarget.isSVR4ABI())
394 markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register.
395 }
396
397 // On PPC64, r13 is the thread pointer. Never allocate this register.
398 if (TM.isPPC64())
399 markSuperRegs(Reserved, PPC::R13);
400
401 if (TFI->needsFP(MF))
402 markSuperRegs(Reserved, PPC::R31);
403
404 bool IsPositionIndependent = TM.isPositionIndependent();
405 if (hasBasePointer(MF)) {
406 if (Subtarget.is32BitELFABI() && IsPositionIndependent)
407 markSuperRegs(Reserved, PPC::R29);
408 else
409 markSuperRegs(Reserved, PPC::R30);
410 }
411
412 if (Subtarget.is32BitELFABI() && IsPositionIndependent)
413 markSuperRegs(Reserved, PPC::R30);
414
415 // Reserve Altivec registers when Altivec is unavailable.
416 if (!Subtarget.hasAltivec())
417 for (MCRegister Reg : PPC::VRRCRegClass)
418 markSuperRegs(Reserved, Reg);
419
420 if (Subtarget.isAIXABI() && Subtarget.hasAltivec() &&
422 // In the AIX default Altivec ABI, vector registers VR20-VR31 are reserved
423 // and cannot be used.
424 for (auto Reg : CSR_Altivec_SaveList) {
425 if (Reg == 0)
426 break;
427 markSuperRegs(Reserved, Reg);
428 for (MCRegAliasIterator AS(Reg, this, true); AS.isValid(); ++AS) {
429 Reserved.set(*AS);
430 }
431 }
432 }
433
434 assert(checkAllSuperRegsMarked(Reserved));
435 return Reserved;
436}
437
439 MCRegister PhysReg) const {
440 // CTR and LR registers are always reserved, but they are asm clobberable.
441 if (PhysReg == PPC::CTR || PhysReg == PPC::CTR8 || PhysReg == PPC::LR ||
442 PhysReg == PPC::LR8)
443 return true;
444
445 return !getReservedRegs(MF).test(PhysReg);
446}
447
449 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
450 const PPCInstrInfo *InstrInfo = Subtarget.getInstrInfo();
451 const MachineFrameInfo &MFI = MF.getFrameInfo();
452 const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
453
454 LLVM_DEBUG(dbgs() << "requiresFrameIndexScavenging for " << MF.getName()
455 << ".\n");
456 // If the callee saved info is invalid we have to default to true for safety.
457 if (!MFI.isCalleeSavedInfoValid()) {
458 LLVM_DEBUG(dbgs() << "TRUE - Invalid callee saved info.\n");
459 return true;
460 }
461
462 // We will require the use of X-Forms because the frame is larger than what
463 // can be represented in signed 16 bits that fit in the immediate of a D-Form.
464 // If we need an X-Form then we need a register to store the address offset.
465 unsigned FrameSize = MFI.getStackSize();
466 // Signed 16 bits means that the FrameSize cannot be more than 15 bits.
467 if (FrameSize & ~0x7FFF) {
468 LLVM_DEBUG(dbgs() << "TRUE - Frame size is too large for D-Form.\n");
469 return true;
470 }
471
472 // The callee saved info is valid so it can be traversed.
473 // Checking for registers that need saving that do not have load or store
474 // forms where the address offset is an immediate.
475 for (const CalleeSavedInfo &CSI : Info) {
476 // If the spill is to a register no scavenging is required.
477 if (CSI.isSpilledToReg())
478 continue;
479
480 int FrIdx = CSI.getFrameIdx();
481 Register Reg = CSI.getReg();
482
484 unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
485 if (!MFI.isFixedObjectIndex(FrIdx)) {
486 // This is not a fixed object. If it requires alignment then we may still
487 // need to use the XForm.
488 if (offsetMinAlignForOpcode(Opcode) > 1) {
489 LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
490 << " for register " << printReg(Reg, this) << ".\n");
491 LLVM_DEBUG(dbgs() << "TRUE - Not fixed frame object that requires "
492 << "alignment.\n");
493 return true;
494 }
495 }
496
497 // This is eiher:
498 // 1) A fixed frame index object which we know are aligned so
499 // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't
500 // need to consider the alignment here.
501 // 2) A not fixed object but in that case we now know that the min required
502 // alignment is no more than 1 based on the previous check.
503 if (InstrInfo->isXFormMemOp(Opcode)) {
504 LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
505 << " for register " << printReg(Reg, this) << ".\n");
506 LLVM_DEBUG(dbgs() << "TRUE - Memory operand is X-Form.\n");
507 return true;
508 }
509
510 // This is a spill/restore of a quadword.
511 if ((Opcode == PPC::RESTORE_QUADWORD) || (Opcode == PPC::SPILL_QUADWORD)) {
512 LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
513 << " for register " << printReg(Reg, this) << ".\n");
514 LLVM_DEBUG(dbgs() << "TRUE - Memory operand is a quadword.\n");
515 return true;
516 }
517 }
518 LLVM_DEBUG(dbgs() << "FALSE - Scavenging is not required.\n");
519 return false;
520}
521
523 const MachineFunction &MF) const {
524 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
525 // Do not use virtual base registers when ROP protection is turned on.
526 // Virtual base registers break the layout of the local variable space and may
527 // push the ROP Hash location past the 512 byte range of the ROP store
528 // instruction.
529 return !Subtarget.hasROPProtect();
530}
531
533 const MachineFunction &MF) const {
535 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
536 const MachineFrameInfo &MFI = MF.getFrameInfo();
537
538 if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI())
539 return false;
540 if (PhysReg == Subtarget.getTOCPointerRegister())
541 // X2/R2 is guaranteed to be preserved within a function if it is reserved.
542 // The reason it's reserved is that it's the TOC pointer (and the function
543 // uses the TOC). In functions where it isn't reserved (i.e. leaf functions
544 // with no TOC access), we can't claim that it is preserved.
545 return (getReservedRegs(MF).test(PhysReg));
546 if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() &&
547 !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
548 // The value of the stack pointer does not change within a function after
549 // the prologue and before the epilogue if there are no dynamic allocations
550 // and no inline asm which clobbers X1/R1.
551 return true;
552 return false;
553}
554
558 const MachineFunction &MF,
559 const VirtRegMap *VRM,
560 const LiveRegMatrix *Matrix) const {
561 const MachineRegisterInfo *MRI = &MF.getRegInfo();
562
563 // Call the base implementation first to set any hints based on the usual
564 // heuristics and decide what the return value should be. We want to return
565 // the same value returned by the base implementation. If the base
566 // implementation decides to return true and force the allocation then we
567 // will leave it as such. On the other hand if the base implementation
568 // decides to return false the following code will not force the allocation
569 // as we are just looking to provide a hint.
570 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
571 VirtReg, Order, Hints, MF, VRM, Matrix);
572
573 // Don't use the allocation hints for ISAFuture.
574 // The WACC registers used in ISAFuture are unlike the ACC registers on
575 // Power 10 and so this logic to register allocation hints does not apply.
576 if (MF.getSubtarget<PPCSubtarget>().isISAFuture())
577 return BaseImplRetVal;
578
579 // We are interested in instructions that copy values to ACC/UACC.
580 // The copy into UACC will be simply a COPY to a subreg so we
581 // want to allocate the corresponding physical subreg for the source.
582 // The copy into ACC will be a BUILD_UACC so we want to allocate
583 // the same number UACC for the source.
584 const TargetRegisterClass *RegClass = MRI->getRegClass(VirtReg);
585 for (MachineInstr &Use : MRI->reg_nodbg_instructions(VirtReg)) {
586 const MachineOperand *ResultOp = nullptr;
587 Register ResultReg;
588 switch (Use.getOpcode()) {
589 case TargetOpcode::COPY: {
590 ResultOp = &Use.getOperand(0);
591 ResultReg = ResultOp->getReg();
592 if (ResultReg.isVirtual() &&
593 MRI->getRegClass(ResultReg)->contains(PPC::UACC0) &&
594 VRM->hasPhys(ResultReg)) {
595 Register UACCPhys = VRM->getPhys(ResultReg);
596 Register HintReg;
597 if (RegClass->contains(PPC::VSRp0)) {
598 HintReg = getSubReg(UACCPhys, ResultOp->getSubReg());
599 // Ensure that the hint is a VSRp register.
600 if (HintReg >= PPC::VSRp0 && HintReg <= PPC::VSRp31)
601 Hints.push_back(HintReg);
602 } else if (RegClass->contains(PPC::ACC0)) {
603 HintReg = PPC::ACC0 + (UACCPhys - PPC::UACC0);
604 if (HintReg >= PPC::ACC0 && HintReg <= PPC::ACC7)
605 Hints.push_back(HintReg);
606 }
607 }
608 break;
609 }
610 case PPC::BUILD_UACC: {
611 ResultOp = &Use.getOperand(0);
612 ResultReg = ResultOp->getReg();
613 if (MRI->getRegClass(ResultReg)->contains(PPC::ACC0) &&
614 VRM->hasPhys(ResultReg)) {
615 Register ACCPhys = VRM->getPhys(ResultReg);
616 assert((ACCPhys >= PPC::ACC0 && ACCPhys <= PPC::ACC7) &&
617 "Expecting an ACC register for BUILD_UACC.");
618 Register HintReg = PPC::UACC0 + (ACCPhys - PPC::ACC0);
619 Hints.push_back(HintReg);
620 }
621 break;
622 }
623 }
624 }
625 return BaseImplRetVal;
626}
627
629 MachineFunction &MF) const {
630 const PPCFrameLowering *TFI = getFrameLowering(MF);
631 const unsigned DefaultSafety = 1;
632
633 switch (RC->getID()) {
634 default:
635 return 0;
636 case PPC::G8RC_NOX0RegClassID:
637 case PPC::GPRC_NOR0RegClassID:
638 case PPC::SPERCRegClassID:
639 case PPC::G8RCRegClassID:
640 case PPC::GPRCRegClassID: {
641 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
642 return 32 - FP - DefaultSafety;
643 }
644 case PPC::F4RCRegClassID:
645 case PPC::F8RCRegClassID:
646 case PPC::VSLRCRegClassID:
647 return 32 - DefaultSafety;
648 case PPC::VFRCRegClassID:
649 case PPC::VRRCRegClassID: {
650 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
651 // Vector registers VR20-VR31 are reserved and cannot be used in the default
652 // Altivec ABI on AIX.
653 if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
654 return 20 - DefaultSafety;
655 }
656 return 32 - DefaultSafety;
657 case PPC::VSFRCRegClassID:
658 case PPC::VSSRCRegClassID:
659 case PPC::VSRCRegClassID: {
660 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
661 if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
662 // Vector registers VR20-VR31 are reserved and cannot be used in the
663 // default Altivec ABI on AIX.
664 return 52 - DefaultSafety;
665 }
666 return 64 - DefaultSafety;
667 case PPC::CRRCRegClassID:
668 return 8 - DefaultSafety;
669 }
670}
671
674 const MachineFunction &MF) const {
675 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
676 const auto *DefaultSuperclass =
678 if (Subtarget.hasVSX()) {
679 // With VSX, we can inflate various sub-register classes to the full VSX
680 // register set.
681
682 // For Power9 we allow the user to enable GPR to vector spills.
683 // FIXME: Currently limited to spilling GP8RC. A follow on patch will add
684 // support to spill GPRC.
685 if (TM.isELFv2ABI() || Subtarget.isAIXABI()) {
686 if (Subtarget.hasP9Vector() && EnableGPRToVecSpills &&
687 RC == &PPC::G8RCRegClass) {
688 InflateGP8RC++;
689 return &PPC::SPILLTOVSRRCRegClass;
690 }
691 if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills)
692 InflateGPRC++;
693 }
694
695 for (unsigned SuperID : RC->superclasses()) {
696 if (getRegSizeInBits(*getRegClass(SuperID)) != getRegSizeInBits(*RC))
697 continue;
698
699 switch (SuperID) {
700 case PPC::VSSRCRegClassID:
701 return Subtarget.hasP8Vector() ? getRegClass(SuperID)
702 : DefaultSuperclass;
703 case PPC::VSFRCRegClassID:
704 case PPC::VSRCRegClassID:
705 return getRegClass(SuperID);
706 case PPC::VSRpRCRegClassID:
707 return Subtarget.pairedVectorMemops() ? getRegClass(SuperID)
708 : DefaultSuperclass;
709 case PPC::ACCRCRegClassID:
710 case PPC::UACCRCRegClassID:
711 return Subtarget.hasMMA() ? getRegClass(SuperID) : DefaultSuperclass;
712 }
713 }
714 }
715
716 return DefaultSuperclass;
717}
718
719//===----------------------------------------------------------------------===//
720// Stack Frame Processing methods
721//===----------------------------------------------------------------------===//
722
723/// lowerDynamicAlloc - Generate the code for allocating an object in the
724/// current frame. The sequence of code will be in the general form
725///
726/// addi R0, SP, \#frameSize ; get the address of the previous frame
727/// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size
728/// addi Rnew, SP, \#maxCalFrameSize ; get the top of the allocation
729///
731 // Get the instruction.
732 MachineInstr &MI = *II;
733 // Get the instruction's basic block.
734 MachineBasicBlock &MBB = *MI.getParent();
735 // Get the basic block's function.
737 // Get the frame info.
738 MachineFrameInfo &MFI = MF.getFrameInfo();
739 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
740 // Get the instruction info.
741 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
742 // Determine whether 64-bit pointers are used.
743 bool LP64 = TM.isPPC64();
744 DebugLoc dl = MI.getDebugLoc();
745
746 // Get the maximum call stack size.
747 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
748 Align MaxAlign = MFI.getMaxAlign();
749 assert(isAligned(MaxAlign, maxCallFrameSize) &&
750 "Maximum call-frame size not sufficiently aligned");
751 (void)MaxAlign;
752
753 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
754 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
755 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
756 bool KillNegSizeReg = MI.getOperand(1).isKill();
757 Register NegSizeReg = MI.getOperand(1).getReg();
758
759 prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg);
760 // Grow the stack and update the stack pointer link, then determine the
761 // address of new allocated space.
762 if (LP64) {
763 BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
765 .addReg(PPC::X1)
766 .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
767 BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
768 .addReg(PPC::X1)
769 .addImm(maxCallFrameSize);
770 } else {
771 BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
773 .addReg(PPC::R1)
774 .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
775 BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
776 .addReg(PPC::R1)
777 .addImm(maxCallFrameSize);
778 }
779
780 // Discard the DYNALLOC instruction.
781 MBB.erase(II);
782}
783
784/// To accomplish dynamic stack allocation, we have to calculate exact size
785/// subtracted from the stack pointer according alignment information and get
786/// previous frame pointer.
788 Register &NegSizeReg,
789 bool &KillNegSizeReg,
790 Register &FramePointer) const {
791 // Get the instruction.
792 MachineInstr &MI = *II;
793 // Get the instruction's basic block.
794 MachineBasicBlock &MBB = *MI.getParent();
795 // Get the basic block's function.
797 // Get the frame info.
798 MachineFrameInfo &MFI = MF.getFrameInfo();
799 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
800 // Get the instruction info.
801 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
802 // Determine whether 64-bit pointers are used.
803 bool LP64 = TM.isPPC64();
804 DebugLoc dl = MI.getDebugLoc();
805 // Get the total frame size.
806 unsigned FrameSize = MFI.getStackSize();
807
808 // Get stack alignments.
809 const PPCFrameLowering *TFI = getFrameLowering(MF);
810 Align TargetAlign = TFI->getStackAlign();
811 Align MaxAlign = MFI.getMaxAlign();
812
813 // Determine the previous frame's address. If FrameSize can't be
814 // represented as 16 bits or we need special alignment, then we load the
815 // previous frame's address from 0(SP). Why not do an addis of the hi?
816 // Because R0 is our only safe tmp register and addi/addis treat R0 as zero.
817 // Constructing the constant and adding would take 3 instructions.
818 // Fortunately, a frame greater than 32K is rare.
819 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
820 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
821
822 if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
823 if (LP64)
824 BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer)
825 .addReg(PPC::X31)
826 .addImm(FrameSize);
827 else
828 BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer)
829 .addReg(PPC::R31)
830 .addImm(FrameSize);
831 } else if (LP64) {
832 BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer)
833 .addImm(0)
834 .addReg(PPC::X1);
835 } else {
836 BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer)
837 .addImm(0)
838 .addReg(PPC::R1);
839 }
840 // Determine the actual NegSizeReg according to alignment info.
841 if (LP64) {
842 if (MaxAlign > TargetAlign) {
843 unsigned UnalNegSizeReg = NegSizeReg;
844 NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
845
846 // Unfortunately, there is no andi, only andi., and we can't insert that
847 // here because we might clobber cr0 while it is live.
848 BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg)
849 .addImm(~(MaxAlign.value() - 1));
850
851 unsigned NegSizeReg1 = NegSizeReg;
852 NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
853 BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg)
854 .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
855 .addReg(NegSizeReg1, RegState::Kill);
856 KillNegSizeReg = true;
857 }
858 } else {
859 if (MaxAlign > TargetAlign) {
860 unsigned UnalNegSizeReg = NegSizeReg;
861 NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
862
863 // Unfortunately, there is no andi, only andi., and we can't insert that
864 // here because we might clobber cr0 while it is live.
865 BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg)
866 .addImm(~(MaxAlign.value() - 1));
867
868 unsigned NegSizeReg1 = NegSizeReg;
869 NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
870 BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg)
871 .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
872 .addReg(NegSizeReg1, RegState::Kill);
873 KillNegSizeReg = true;
874 }
875 }
876}
877
880 MachineInstr &MI = *II;
881 // Get the instruction's basic block.
882 MachineBasicBlock &MBB = *MI.getParent();
883 // Get the basic block's function.
885 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
886 // Get the instruction info.
887 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
888 // Determine whether 64-bit pointers are used.
889 bool LP64 = TM.isPPC64();
890 DebugLoc dl = MI.getDebugLoc();
891 Register FramePointer = MI.getOperand(0).getReg();
892 const Register ActualNegSizeReg = MI.getOperand(1).getReg();
893 bool KillNegSizeReg = MI.getOperand(2).isKill();
894 Register NegSizeReg = MI.getOperand(2).getReg();
895 const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR);
896 // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg.
897 if (FramePointer == NegSizeReg) {
898 assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, "
899 "NegSizeReg should be killed");
900 // FramePointer is clobbered earlier than the use of NegSizeReg in
901 // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid
902 // misuse.
903 BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
904 .addReg(NegSizeReg)
905 .addReg(NegSizeReg);
906 NegSizeReg = ActualNegSizeReg;
907 KillNegSizeReg = false;
908 }
909 prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer);
910 // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign >
911 // TargetAlign.
912 if (NegSizeReg != ActualNegSizeReg)
913 BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
914 .addReg(NegSizeReg)
915 .addReg(NegSizeReg);
916 MBB.erase(II);
917}
918
921 // Get the instruction.
922 MachineInstr &MI = *II;
923 // Get the instruction's basic block.
924 MachineBasicBlock &MBB = *MI.getParent();
925 // Get the basic block's function.
927 // Get the frame info.
928 MachineFrameInfo &MFI = MF.getFrameInfo();
929 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
930 // Get the instruction info.
931 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
932
933 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
934 bool is64Bit = TM.isPPC64();
935 DebugLoc dl = MI.getDebugLoc();
936 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI),
937 MI.getOperand(0).getReg())
938 .addImm(maxCallFrameSize);
939 MBB.erase(II);
940}
941
942/// lowerCRSpilling - Generate the code for spilling a CR register. Instead of
943/// reserving a whole register (R0), we scrounge for one here. This generates
944/// code like this:
945///
946/// mfcr rA ; Move the conditional register into GPR rA.
947/// rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot.
948/// stw rA, FI ; Store rA to the frame.
949///
951 unsigned FrameIndex) const {
952 // Get the instruction.
953 MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset>
954 // Get the instruction's basic block.
955 MachineBasicBlock &MBB = *MI.getParent();
957 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
958 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
959 DebugLoc dl = MI.getDebugLoc();
960
961 bool LP64 = TM.isPPC64();
962 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
963 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
964
965 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
966 Register SrcReg = MI.getOperand(0).getReg();
967
968 // We need to store the CR in the low 4-bits of the saved value. First, issue
969 // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg.
970 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
971 .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
972
973 // If the saved register wasn't CR0, shift the bits left so that they are in
974 // CR0's slot.
975 if (SrcReg != PPC::CR0) {
976 Register Reg1 = Reg;
977 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
978
979 // rlwinm rA, rA, ShiftBits, 0, 31.
980 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
981 .addReg(Reg1, RegState::Kill)
982 .addImm(getEncodingValue(SrcReg) * 4)
983 .addImm(0)
984 .addImm(31);
985 }
986
987 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
988 .addReg(Reg, RegState::Kill),
989 FrameIndex);
990
991 // Discard the pseudo instruction.
992 MBB.erase(II);
993}
994
996 unsigned FrameIndex) const {
997 // Get the instruction.
998 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CR <offset>
999 // Get the instruction's basic block.
1000 MachineBasicBlock &MBB = *MI.getParent();
1001 MachineFunction &MF = *MBB.getParent();
1002 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1003 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1004 DebugLoc dl = MI.getDebugLoc();
1005
1006 bool LP64 = TM.isPPC64();
1007 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1008 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1009
1010 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1011 Register DestReg = MI.getOperand(0).getReg();
1012 assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) &&
1013 "RESTORE_CR does not define its destination");
1014
1015 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
1016 Reg), FrameIndex);
1017
1018 // If the reloaded register isn't CR0, shift the bits right so that they are
1019 // in the right CR's slot.
1020 if (DestReg != PPC::CR0) {
1021 Register Reg1 = Reg;
1022 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1023
1024 unsigned ShiftBits = getEncodingValue(DestReg)*4;
1025 // rlwinm r11, r11, 32-ShiftBits, 0, 31.
1026 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
1027 .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0)
1028 .addImm(31);
1029 }
1030
1031 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
1032 .addReg(Reg, RegState::Kill);
1033
1034 // Discard the pseudo instruction.
1035 MBB.erase(II);
1036}
1037
1039 unsigned FrameIndex) const {
1040 // Get the instruction.
1041 MachineInstr &MI = *II; // ; SPILL_CRBIT <SrcReg>, <offset>
1042 // Get the instruction's basic block.
1043 MachineBasicBlock &MBB = *MI.getParent();
1044 MachineFunction &MF = *MBB.getParent();
1045 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1046 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1047 const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo();
1048 DebugLoc dl = MI.getDebugLoc();
1049
1050 bool LP64 = TM.isPPC64();
1051 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1052 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1053
1054 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1055 Register SrcReg = MI.getOperand(0).getReg();
1056
1057 // Search up the BB to find the definition of the CR bit.
1060 ++Ins;
1061 unsigned CRBitSpillDistance = 0;
1062 bool SeenUse = false;
1063 for (; Ins != Rend; ++Ins) {
1064 // Definition found.
1065 if (Ins->modifiesRegister(SrcReg, TRI))
1066 break;
1067 // Use found.
1068 if (Ins->readsRegister(SrcReg, TRI))
1069 SeenUse = true;
1070 // Unable to find CR bit definition within maximum search distance.
1071 if (CRBitSpillDistance == MaxCRBitSpillDist) {
1072 Ins = MI;
1073 break;
1074 }
1075 // Skip debug instructions when counting CR bit spill distance.
1076 if (!Ins->isDebugInstr())
1077 CRBitSpillDistance++;
1078 }
1079
1080 // Unable to find the definition of the CR bit in the MBB.
1081 if (Ins == MBB.rend())
1082 Ins = MI;
1083
1084 bool SpillsKnownBit = false;
1085 // There is no need to extract the CR bit if its value is already known.
1086 switch (Ins->getOpcode()) {
1087 case PPC::CRUNSET:
1088 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg)
1089 .addImm(0);
1090 SpillsKnownBit = true;
1091 break;
1092 case PPC::CRSET:
1093 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg)
1094 .addImm(-32768);
1095 SpillsKnownBit = true;
1096 break;
1097 default:
1098 // On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all
1099 // bits (specifically, it produces a -1 if the CR bit is set). Ultimately,
1100 // the bit that is of importance to us is bit 32 (bit 0 of a 32-bit
1101 // register), and SETNBC will set this.
1102 if (Subtarget.isISA3_1()) {
1103 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg)
1104 .addReg(SrcReg, RegState::Undef);
1105 break;
1106 }
1107
1108 // On Power9, we can use SETB to extract the LT bit. This only works for
1109 // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value
1110 // of the bit we care about (32-bit sign bit) will be set to the value of
1111 // the LT bit (regardless of the other bits in the CR field).
1112 if (Subtarget.isISA3_0()) {
1113 if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT ||
1114 SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT ||
1115 SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
1116 SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
1117 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg)
1119 break;
1120 }
1121 }
1122
1123 // We need to move the CR field that contains the CR bit we are spilling.
1124 // The super register may not be explicitly defined (i.e. it can be defined
1125 // by a CR-logical that only defines the subreg) so we state that the CR
1126 // field is undef. Also, in order to preserve the kill flag on the CR bit,
1127 // we add it as an implicit use.
1128 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
1130 .addReg(SrcReg,
1131 RegState::Implicit | getKillRegState(MI.getOperand(0).isKill()));
1132
1133 // If the saved register wasn't CR0LT, shift the bits left so that the bit
1134 // to store is the first one. Mask all but that bit.
1135 Register Reg1 = Reg;
1136 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1137
1138 // rlwinm rA, rA, ShiftBits, 0, 0.
1139 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
1140 .addReg(Reg1, RegState::Kill)
1141 .addImm(getEncodingValue(SrcReg))
1142 .addImm(0).addImm(0);
1143 }
1144 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
1145 .addReg(Reg, RegState::Kill),
1146 FrameIndex);
1147
1148 bool KillsCRBit = MI.killsRegister(SrcReg, TRI);
1149 // Discard the pseudo instruction.
1150 MBB.erase(II);
1151 if (SpillsKnownBit && KillsCRBit && !SeenUse) {
1152 Ins->setDesc(TII.get(PPC::UNENCODED_NOP));
1153 Ins->removeOperand(0);
1154 }
1155}
1156
1158 unsigned FrameIndex) const {
1159 // Get the instruction.
1160 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CRBIT <offset>
1161 // Get the instruction's basic block.
1162 MachineBasicBlock &MBB = *MI.getParent();
1163 MachineFunction &MF = *MBB.getParent();
1164 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1165 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1166 DebugLoc dl = MI.getDebugLoc();
1167
1168 bool LP64 = TM.isPPC64();
1169 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1170 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1171
1172 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1173 Register DestReg = MI.getOperand(0).getReg();
1174 assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) &&
1175 "RESTORE_CRBIT does not define its destination");
1176
1177 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
1178 Reg), FrameIndex);
1179
1180 BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg);
1181
1182 Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1183 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
1184 .addReg(getCRFromCRBit(DestReg));
1185
1186 unsigned ShiftBits = getEncodingValue(DestReg);
1187 // rlwimi r11, r10, 32-ShiftBits, ..., ...
1188 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
1189 .addReg(RegO, RegState::Kill)
1190 .addReg(Reg, RegState::Kill)
1191 .addImm(ShiftBits ? 32 - ShiftBits : 0)
1192 .addImm(ShiftBits)
1193 .addImm(ShiftBits);
1194
1195 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF),
1196 getCRFromCRBit(DestReg))
1197 .addReg(RegO, RegState::Kill)
1198 // Make sure we have a use dependency all the way through this
1199 // sequence of instructions. We can't have the other bits in the CR
1200 // modified in between the mfocrf and the mtocrf.
1202
1203 // Discard the pseudo instruction.
1204 MBB.erase(II);
1205}
1206
1208 MCRegister DestReg, MCRegister SrcReg) {
1209#ifdef NDEBUG
1210 return;
1211#else
1212 if (ReportAccMoves) {
1213 std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc";
1214 std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc";
1215 dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n";
1216 MBB.dump();
1217 }
1218#endif
1219}
1220
1222 bool IsRestore) {
1223#ifdef NDEBUG
1224 return;
1225#else
1226 if (ReportAccMoves) {
1227 dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc") << " register "
1228 << (IsRestore ? "restore" : "spill") << ":\n";
1229 MBB.dump();
1230 }
1231#endif
1232}
1233
1236 const TargetInstrInfo &TII, Register SrcReg,
1237 unsigned FrameIndex, bool IsLittleEndian,
1238 bool IsKilled, bool TwoPairs) {
1239 unsigned Offset = 0;
1240 // The register arithmetic in this function does not support virtual
1241 // registers.
1242 assert(!SrcReg.isVirtual() &&
1243 "Spilling register pairs does not support virtual registers.");
1244
1245 if (TwoPairs)
1246 Offset = IsLittleEndian ? 48 : 0;
1247 else
1248 Offset = IsLittleEndian ? 16 : 0;
1249 Register Reg = (SrcReg > PPC::VSRp15) ? PPC::V0 + (SrcReg - PPC::VSRp16) * 2
1250 : PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1251 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
1252 .addReg(Reg, getKillRegState(IsKilled)),
1253 FrameIndex, Offset);
1254 Offset += IsLittleEndian ? -16 : 16;
1255 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
1256 .addReg(Reg + 1, getKillRegState(IsKilled)),
1257 FrameIndex, Offset);
1258 if (TwoPairs) {
1259 Offset += IsLittleEndian ? -16 : 16;
1260 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
1261 .addReg(Reg + 2, getKillRegState(IsKilled)),
1262 FrameIndex, Offset);
1263 Offset += IsLittleEndian ? -16 : 16;
1264 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
1265 .addReg(Reg + 3, getKillRegState(IsKilled)),
1266 FrameIndex, Offset);
1267 }
1268}
1269
1270/// Remove any STXVP[X] instructions and split them out into a pair of
1271/// STXV[X] instructions if --disable-auto-paired-vec-st is specified on
1272/// the command line.
1274 unsigned FrameIndex) const {
1276 "Expecting to do this only if paired vector stores are disabled.");
1277 MachineInstr &MI = *II; // STXVP <SrcReg>, <offset>
1278 MachineBasicBlock &MBB = *MI.getParent();
1279 MachineFunction &MF = *MBB.getParent();
1280 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1281 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1282 DebugLoc DL = MI.getDebugLoc();
1283 Register SrcReg = MI.getOperand(0).getReg();
1284 bool IsLittleEndian = Subtarget.isLittleEndian();
1285 bool IsKilled = MI.getOperand(0).isKill();
1286 spillRegPairs(MBB, II, DL, TII, SrcReg, FrameIndex, IsLittleEndian, IsKilled,
1287 /* TwoPairs */ false);
1288 // Discard the original instruction.
1289 MBB.erase(II);
1290}
1291
1292static void emitWAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsRestore) {
1293#ifdef NDEBUG
1294 return;
1295#else
1296 if (ReportAccMoves) {
1297 dbgs() << "Emitting wacc register " << (IsRestore ? "restore" : "spill")
1298 << ":\n";
1299 MBB.dump();
1300 }
1301#endif
1302}
1303
1304/// lowerACCSpilling - Generate the code for spilling the accumulator register.
1305/// Similarly to other spills/reloads that use pseudo-ops, we do not actually
1306/// eliminate the FrameIndex here nor compute the stack offset. We simply
1307/// create a real instruction with an FI and rely on eliminateFrameIndex to
1308/// handle the FI elimination.
1310 unsigned FrameIndex) const {
1311 MachineInstr &MI = *II; // SPILL_ACC <SrcReg>, <offset>
1312 MachineBasicBlock &MBB = *MI.getParent();
1313 MachineFunction &MF = *MBB.getParent();
1314 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1315 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1316 DebugLoc DL = MI.getDebugLoc();
1317 Register SrcReg = MI.getOperand(0).getReg();
1318 bool IsKilled = MI.getOperand(0).isKill();
1319
1320 bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1321 Register Reg =
1322 PPC::VSRp0 + (SrcReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1323 bool IsLittleEndian = Subtarget.isLittleEndian();
1324
1325 emitAccSpillRestoreInfo(MBB, IsPrimed, false);
1326
1327 // De-prime the register being spilled, create two stores for the pair
1328 // subregisters accounting for endianness and then re-prime the register if
1329 // it isn't killed. This uses the Offset parameter to addFrameReference() to
1330 // adjust the offset of the store that is within the 64-byte stack slot.
1331 if (IsPrimed)
1332 BuildMI(MBB, II, DL, TII.get(PPC::XXMFACC), SrcReg).addReg(SrcReg);
1334 spillRegPairs(MBB, II, DL, TII, Reg, FrameIndex, IsLittleEndian, IsKilled,
1335 /* TwoPairs */ true);
1336 else {
1337 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1338 .addReg(Reg, getKillRegState(IsKilled)),
1339 FrameIndex, IsLittleEndian ? 32 : 0);
1340 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1341 .addReg(Reg + 1, getKillRegState(IsKilled)),
1342 FrameIndex, IsLittleEndian ? 0 : 32);
1343 }
1344 if (IsPrimed && !IsKilled)
1345 BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), SrcReg).addReg(SrcReg);
1346
1347 // Discard the pseudo instruction.
1348 MBB.erase(II);
1349}
1350
1351/// lowerACCRestore - Generate the code to restore the accumulator register.
1353 unsigned FrameIndex) const {
1354 MachineInstr &MI = *II; // <DestReg> = RESTORE_ACC <offset>
1355 MachineBasicBlock &MBB = *MI.getParent();
1356 MachineFunction &MF = *MBB.getParent();
1357 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1358 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1359 DebugLoc DL = MI.getDebugLoc();
1360
1361 Register DestReg = MI.getOperand(0).getReg();
1362 assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) &&
1363 "RESTORE_ACC does not define its destination");
1364
1365 bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
1366 Register Reg =
1367 PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1368 bool IsLittleEndian = Subtarget.isLittleEndian();
1369
1370 emitAccSpillRestoreInfo(MBB, IsPrimed, true);
1371
1372 // Create two loads for the pair subregisters accounting for endianness and
1373 // then prime the accumulator register being restored.
1374 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg),
1375 FrameIndex, IsLittleEndian ? 32 : 0);
1376 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg + 1),
1377 FrameIndex, IsLittleEndian ? 0 : 32);
1378 if (IsPrimed)
1379 BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), DestReg).addReg(DestReg);
1380
1381 // Discard the pseudo instruction.
1382 MBB.erase(II);
1383}
1384
1385/// lowerWACCSpilling - Generate the code for spilling the wide accumulator
1386/// register.
1388 unsigned FrameIndex) const {
1389 MachineInstr &MI = *II; // SPILL_WACC <SrcReg>, <offset>
1390 MachineBasicBlock &MBB = *MI.getParent();
1391 MachineFunction &MF = *MBB.getParent();
1392 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1393 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1394 DebugLoc DL = MI.getDebugLoc();
1395 bool IsLittleEndian = Subtarget.isLittleEndian();
1396
1398
1399 const TargetRegisterClass *RC = &PPC::VSRpRCRegClass;
1400 Register VSRpReg0 = MF.getRegInfo().createVirtualRegister(RC);
1401 Register VSRpReg1 = MF.getRegInfo().createVirtualRegister(RC);
1402 Register SrcReg = MI.getOperand(0).getReg();
1403
1404 BuildMI(MBB, II, DL, TII.get(PPC::DMXXEXTFDMR512), VSRpReg0)
1405 .addDef(VSRpReg1)
1406 .addReg(SrcReg);
1407
1408 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1409 .addReg(VSRpReg0, RegState::Kill),
1410 FrameIndex, IsLittleEndian ? 32 : 0);
1411 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1412 .addReg(VSRpReg1, RegState::Kill),
1413 FrameIndex, IsLittleEndian ? 0 : 32);
1414
1415 // Discard the pseudo instruction.
1416 MBB.erase(II);
1417}
1418
1419/// lowerWACCRestore - Generate the code to restore the wide accumulator
1420/// register.
1422 unsigned FrameIndex) const {
1423 MachineInstr &MI = *II; // <DestReg> = RESTORE_WACC <offset>
1424 MachineBasicBlock &MBB = *MI.getParent();
1425 MachineFunction &MF = *MBB.getParent();
1426 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1427 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1428 DebugLoc DL = MI.getDebugLoc();
1429 bool IsLittleEndian = Subtarget.isLittleEndian();
1430
1432
1433 const TargetRegisterClass *RC = &PPC::VSRpRCRegClass;
1434 Register VSRpReg0 = MF.getRegInfo().createVirtualRegister(RC);
1435 Register VSRpReg1 = MF.getRegInfo().createVirtualRegister(RC);
1436 Register DestReg = MI.getOperand(0).getReg();
1437
1438 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), VSRpReg0),
1439 FrameIndex, IsLittleEndian ? 32 : 0);
1440 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), VSRpReg1),
1441 FrameIndex, IsLittleEndian ? 0 : 32);
1442
1443 // Kill VSRpReg0, VSRpReg1 (killedRegState::Killed)
1444 BuildMI(MBB, II, DL, TII.get(PPC::DMXXINSTFDMR512), DestReg)
1445 .addReg(VSRpReg0, RegState::Kill)
1446 .addReg(VSRpReg1, RegState::Kill);
1447
1448 // Discard the pseudo instruction.
1449 MBB.erase(II);
1450}
1451
1452/// lowerQuadwordSpilling - Generate code to spill paired general register.
1454 unsigned FrameIndex) const {
1455 MachineInstr &MI = *II;
1456 MachineBasicBlock &MBB = *MI.getParent();
1457 MachineFunction &MF = *MBB.getParent();
1458 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1459 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1460 DebugLoc DL = MI.getDebugLoc();
1461
1462 Register SrcReg = MI.getOperand(0).getReg();
1463 bool IsKilled = MI.getOperand(0).isKill();
1464
1465 Register Reg = PPC::X0 + (SrcReg - PPC::G8p0) * 2;
1466 bool IsLittleEndian = Subtarget.isLittleEndian();
1467
1468 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
1469 .addReg(Reg, getKillRegState(IsKilled)),
1470 FrameIndex, IsLittleEndian ? 8 : 0);
1471 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
1472 .addReg(Reg + 1, getKillRegState(IsKilled)),
1473 FrameIndex, IsLittleEndian ? 0 : 8);
1474
1475 // Discard the pseudo instruction.
1476 MBB.erase(II);
1477}
1478
1479/// lowerQuadwordRestore - Generate code to restore paired general register.
1481 unsigned FrameIndex) const {
1482 MachineInstr &MI = *II;
1483 MachineBasicBlock &MBB = *MI.getParent();
1484 MachineFunction &MF = *MBB.getParent();
1485 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1486 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1487 DebugLoc DL = MI.getDebugLoc();
1488
1489 Register DestReg = MI.getOperand(0).getReg();
1490 assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) &&
1491 "RESTORE_QUADWORD does not define its destination");
1492
1493 Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2;
1494 bool IsLittleEndian = Subtarget.isLittleEndian();
1495
1496 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg), FrameIndex,
1497 IsLittleEndian ? 8 : 0);
1498 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg + 1), FrameIndex,
1499 IsLittleEndian ? 0 : 8);
1500
1501 // Discard the pseudo instruction.
1502 MBB.erase(II);
1503}
1504
1506 Register Reg, int &FrameIdx) const {
1507 // For the nonvolatile condition registers (CR2, CR3, CR4) return true to
1508 // prevent allocating an additional frame slot.
1509 // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8,
1510 // for 32-bit AIX the CR save area is in the linkage area at SP+4.
1511 // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos
1512 // valid.
1513 // For 32-bit ELF, we have previously created the stack slot if needed, so
1514 // return its FrameIdx.
1515 if (PPC::CR2 <= Reg && Reg <= PPC::CR4) {
1516 FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex();
1517 return true;
1518 }
1519 return false;
1520}
1521
1522// If the offset must be a multiple of some value, return what that value is.
1523static unsigned offsetMinAlignForOpcode(unsigned OpC) {
1524 switch (OpC) {
1525 default:
1526 return 1;
1527 case PPC::LWA:
1528 case PPC::LWA_32:
1529 case PPC::LD:
1530 case PPC::LDU:
1531 case PPC::STD:
1532 case PPC::STDU:
1533 case PPC::DFLOADf32:
1534 case PPC::DFLOADf64:
1535 case PPC::DFSTOREf32:
1536 case PPC::DFSTOREf64:
1537 case PPC::LXSD:
1538 case PPC::LXSSP:
1539 case PPC::STXSD:
1540 case PPC::STXSSP:
1541 case PPC::STQ:
1542 return 4;
1543 case PPC::EVLDD:
1544 case PPC::EVSTDD:
1545 return 8;
1546 case PPC::LXV:
1547 case PPC::STXV:
1548 case PPC::LQ:
1549 case PPC::LXVP:
1550 case PPC::STXVP:
1551 return 16;
1552 }
1553}
1554
1555// If the offset must be a multiple of some value, return what that value is.
1556static unsigned offsetMinAlign(const MachineInstr &MI) {
1557 unsigned OpC = MI.getOpcode();
1558 return offsetMinAlignForOpcode(OpC);
1559}
1560
1561// Return the OffsetOperandNo given the FIOperandNum (and the instruction).
1562static unsigned getOffsetONFromFION(const MachineInstr &MI,
1563 unsigned FIOperandNum) {
1564 // Take into account whether it's an add or mem instruction
1565 unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
1566 if (MI.isInlineAsm())
1567 OffsetOperandNo = FIOperandNum - 1;
1568 else if (MI.getOpcode() == TargetOpcode::STACKMAP ||
1569 MI.getOpcode() == TargetOpcode::PATCHPOINT)
1570 OffsetOperandNo = FIOperandNum + 1;
1571
1572 return OffsetOperandNo;
1573}
1574
1575bool
1577 int SPAdj, unsigned FIOperandNum,
1578 RegScavenger *RS) const {
1579 assert(SPAdj == 0 && "Unexpected");
1580
1581 // Get the instruction.
1582 MachineInstr &MI = *II;
1583 // Get the instruction's basic block.
1584 MachineBasicBlock &MBB = *MI.getParent();
1585 // Get the basic block's function.
1586 MachineFunction &MF = *MBB.getParent();
1587 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1588 // Get the instruction info.
1589 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1590 // Get the frame info.
1591 MachineFrameInfo &MFI = MF.getFrameInfo();
1592 DebugLoc dl = MI.getDebugLoc();
1593
1594 unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1595
1596 // Get the frame index.
1597 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
1598
1599 // Get the frame pointer save index. Users of this index are primarily
1600 // DYNALLOC instructions.
1602 int FPSI = FI->getFramePointerSaveIndex();
1603 // Get the instruction opcode.
1604 unsigned OpC = MI.getOpcode();
1605
1606 if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) {
1608 // lowerDynamicAreaOffset erases II
1609 return true;
1610 }
1611
1612 // Special case for dynamic alloca.
1613 if (FPSI && FrameIndex == FPSI &&
1614 (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
1616 // lowerDynamicAlloc erases II
1617 return true;
1618 }
1619
1620 if (FPSI && FrameIndex == FPSI &&
1621 (OpC == PPC::PREPARE_PROBED_ALLOCA_64 ||
1622 OpC == PPC::PREPARE_PROBED_ALLOCA_32 ||
1623 OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 ||
1624 OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) {
1626 // lowerPrepareProbedAlloca erases II
1627 return true;
1628 }
1629
1630 // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc.
1631 if (OpC == PPC::SPILL_CR) {
1632 lowerCRSpilling(II, FrameIndex);
1633 return true;
1634 } else if (OpC == PPC::RESTORE_CR) {
1635 lowerCRRestore(II, FrameIndex);
1636 return true;
1637 } else if (OpC == PPC::SPILL_CRBIT) {
1638 lowerCRBitSpilling(II, FrameIndex);
1639 return true;
1640 } else if (OpC == PPC::RESTORE_CRBIT) {
1641 lowerCRBitRestore(II, FrameIndex);
1642 return true;
1643 } else if (OpC == PPC::SPILL_ACC || OpC == PPC::SPILL_UACC) {
1644 lowerACCSpilling(II, FrameIndex);
1645 return true;
1646 } else if (OpC == PPC::RESTORE_ACC || OpC == PPC::RESTORE_UACC) {
1647 lowerACCRestore(II, FrameIndex);
1648 return true;
1649 } else if (OpC == PPC::STXVP && DisableAutoPairedVecSt) {
1650 lowerOctWordSpilling(II, FrameIndex);
1651 return true;
1652 } else if (OpC == PPC::SPILL_WACC) {
1653 lowerWACCSpilling(II, FrameIndex);
1654 return true;
1655 } else if (OpC == PPC::RESTORE_WACC) {
1656 lowerWACCRestore(II, FrameIndex);
1657 return true;
1658 } else if (OpC == PPC::SPILL_QUADWORD) {
1659 lowerQuadwordSpilling(II, FrameIndex);
1660 return true;
1661 } else if (OpC == PPC::RESTORE_QUADWORD) {
1662 lowerQuadwordRestore(II, FrameIndex);
1663 return true;
1664 }
1665
1666 // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
1667 MI.getOperand(FIOperandNum).ChangeToRegister(
1668 FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false);
1669
1670 // If the instruction is not present in ImmToIdxMap, then it has no immediate
1671 // form (and must be r+r).
1672 bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
1673 OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
1674
1675 // Now add the frame object offset to the offset from r1.
1676 int64_t Offset = MFI.getObjectOffset(FrameIndex);
1677 Offset += MI.getOperand(OffsetOperandNo).getImm();
1678
1679 // If we're not using a Frame Pointer that has been set to the value of the
1680 // SP before having the stack size subtracted from it, then add the stack size
1681 // to Offset to get the correct offset.
1682 // Naked functions have stack size 0, although getStackSize may not reflect
1683 // that because we didn't call all the pieces that compute it for naked
1684 // functions.
1685 if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
1686 if (!(hasBasePointer(MF) && FrameIndex < 0))
1687 Offset += MFI.getStackSize();
1688 }
1689
1690 // If we encounter an LXVP/STXVP with an offset that doesn't fit, we can
1691 // transform it to the prefixed version so we don't have to use the XForm.
1692 if ((OpC == PPC::LXVP || OpC == PPC::STXVP) &&
1693 (!isInt<16>(Offset) || (Offset % offsetMinAlign(MI)) != 0) &&
1694 Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector()) {
1695 unsigned NewOpc = OpC == PPC::LXVP ? PPC::PLXVP : PPC::PSTXVP;
1696 MI.setDesc(TII.get(NewOpc));
1697 OpC = NewOpc;
1698 }
1699
1700 // If we can, encode the offset directly into the instruction. If this is a
1701 // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If
1702 // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits
1703 // clear can be encoded. This is extremely uncommon, because normally you
1704 // only "std" to a stack slot that is at least 4-byte aligned, but it can
1705 // happen in invalid code.
1706 assert(OpC != PPC::DBG_VALUE &&
1707 "This should be handled in a target-independent way");
1708 // FIXME: This should be factored out to a separate function as prefixed
1709 // instructions add a number of opcodes for which we can use 34-bit imm.
1710 bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ?
1711 isUInt<8>(Offset) :
1712 isInt<16>(Offset);
1713 if (TII.isPrefixed(MI.getOpcode()))
1714 OffsetFitsMnemonic = isInt<34>(Offset);
1715 if (!noImmForm && ((OffsetFitsMnemonic &&
1716 ((Offset % offsetMinAlign(MI)) == 0)) ||
1717 OpC == TargetOpcode::STACKMAP ||
1718 OpC == TargetOpcode::PATCHPOINT)) {
1719 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1720 return false;
1721 }
1722
1723 // The offset doesn't fit into a single register, scavenge one to build the
1724 // offset in.
1725
1726 bool is64Bit = TM.isPPC64();
1727 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1728 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1729 const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC;
1730 unsigned NewOpcode = 0u;
1731 bool ScavengingFailed = RS && RS->getRegsAvailable(RC).none() &&
1732 RS->getRegsAvailable(&PPC::VSFRCRegClass).any();
1733 Register SRegHi, SReg, VSReg;
1734
1735 // The register scavenger is unable to get a GPR but can get a VSR. We
1736 // need to stash a GPR into a VSR so that we can free one up.
1737 if (ScavengingFailed && Subtarget.hasDirectMove()) {
1738 // Pick a volatile register and if we are spilling/restoring that
1739 // particular one, pick the next one.
1740 SRegHi = SReg = is64Bit ? PPC::X4 : PPC::R4;
1741 if (MI.getOperand(0).getReg() == SReg)
1742 SRegHi = SReg = SReg + 1;
1743 VSReg = MF.getRegInfo().createVirtualRegister(&PPC::VSFRCRegClass);
1744 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::MTVSRD : PPC::MTVSRWZ), VSReg)
1745 .addReg(SReg);
1746 } else {
1747 SRegHi = MF.getRegInfo().createVirtualRegister(RC);
1748 SReg = MF.getRegInfo().createVirtualRegister(RC);
1749 }
1750
1751 // Insert a set of rA with the full offset value before the ld, st, or add
1752 if (isInt<16>(Offset))
1753 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg)
1754 .addImm(Offset);
1755 else if (isInt<32>(Offset)) {
1756 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi)
1757 .addImm(Offset >> 16);
1758 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg)
1759 .addReg(SRegHi, RegState::Kill)
1760 .addImm(Offset);
1761 } else {
1762 assert(is64Bit && "Huge stack is only supported on PPC64");
1763 TII.materializeImmPostRA(MBB, II, dl, SReg, Offset);
1764 }
1765
1766 // Convert into indexed form of the instruction:
1767 //
1768 // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0
1769 // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
1770 unsigned OperandBase;
1771
1772 if (noImmForm)
1773 OperandBase = 1;
1774 else if (OpC != TargetOpcode::INLINEASM &&
1775 OpC != TargetOpcode::INLINEASM_BR) {
1776 assert(ImmToIdxMap.count(OpC) &&
1777 "No indexed form of load or store available!");
1778 NewOpcode = ImmToIdxMap.find(OpC)->second;
1779 MI.setDesc(TII.get(NewOpcode));
1780 OperandBase = 1;
1781 } else {
1782 OperandBase = OffsetOperandNo;
1783 }
1784
1785 Register StackReg = MI.getOperand(FIOperandNum).getReg();
1786 MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
1787 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
1788
1789 // If we stashed a value from a GPR into a VSR, we need to get it back after
1790 // spilling the register.
1791 if (ScavengingFailed && Subtarget.hasDirectMove())
1792 BuildMI(MBB, ++II, dl, TII.get(is64Bit ? PPC::MFVSRD : PPC::MFVSRWZ), SReg)
1793 .addReg(VSReg);
1794
1795 // Since these are not real X-Form instructions, we must
1796 // add the registers and access 0(NewReg) rather than
1797 // emitting the X-Form pseudo.
1798 if (NewOpcode == PPC::LQX_PSEUDO || NewOpcode == PPC::STQX_PSEUDO) {
1799 assert(is64Bit && "Quadword loads/stores only supported in 64-bit mode");
1800 Register NewReg = MF.getRegInfo().createVirtualRegister(&PPC::G8RCRegClass);
1801 BuildMI(MBB, II, dl, TII.get(PPC::ADD8), NewReg)
1802 .addReg(SReg, RegState::Kill)
1803 .addReg(StackReg);
1804 MI.setDesc(TII.get(NewOpcode == PPC::LQX_PSEUDO ? PPC::LQ : PPC::STQ));
1805 MI.getOperand(OperandBase + 1).ChangeToRegister(NewReg, false);
1806 MI.getOperand(OperandBase).ChangeToImmediate(0);
1807 }
1808 return false;
1809}
1810
1812 const PPCFrameLowering *TFI = getFrameLowering(MF);
1813
1814 if (!TM.isPPC64())
1815 return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
1816 else
1817 return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
1818}
1819
1821 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1822 if (!hasBasePointer(MF))
1823 return getFrameRegister(MF);
1824
1825 if (TM.isPPC64())
1826 return PPC::X30;
1827
1828 if (Subtarget.isSVR4ABI() && TM.isPositionIndependent())
1829 return PPC::R29;
1830
1831 return PPC::R30;
1832}
1833
1835 if (!EnableBasePointer)
1836 return false;
1838 return true;
1839
1840 // If we need to realign the stack, then the stack pointer can no longer
1841 // serve as an offset into the caller's stack space. As a result, we need a
1842 // base pointer.
1843 return hasStackRealignment(MF);
1844}
1845
1846/// Returns true if the instruction's frame index
1847/// reference would be better served by a base register other than FP
1848/// or SP. Used by LocalStackFrameAllocation to determine which frame index
1849/// references it should create new base registers for.
1851needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1852 assert(Offset < 0 && "Local offset must be negative");
1853
1854 // It's the load/store FI references that cause issues, as it can be difficult
1855 // to materialize the offset if it won't fit in the literal field. Estimate
1856 // based on the size of the local frame and some conservative assumptions
1857 // about the rest of the stack frame (note, this is pre-regalloc, so
1858 // we don't know everything for certain yet) whether this offset is likely
1859 // to be out of range of the immediate. Return true if so.
1860
1861 // We only generate virtual base registers for loads and stores that have
1862 // an r+i form. Return false for everything else.
1863 unsigned OpC = MI->getOpcode();
1864 if (!ImmToIdxMap.count(OpC))
1865 return false;
1866
1867 // Don't generate a new virtual base register just to add zero to it.
1868 if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) &&
1869 MI->getOperand(2).getImm() == 0)
1870 return false;
1871
1872 MachineBasicBlock &MBB = *MI->getParent();
1873 MachineFunction &MF = *MBB.getParent();
1874 const PPCFrameLowering *TFI = getFrameLowering(MF);
1875 unsigned StackEst = TFI->determineFrameLayout(MF, true);
1876
1877 // If we likely don't need a stack frame, then we probably don't need a
1878 // virtual base register either.
1879 if (!StackEst)
1880 return false;
1881
1882 // Estimate an offset from the stack pointer.
1883 // The incoming offset is relating to the SP at the start of the function,
1884 // but when we access the local it'll be relative to the SP after local
1885 // allocation, so adjust our SP-relative offset by that allocation size.
1886 Offset += StackEst;
1887
1888 // The frame pointer will point to the end of the stack, so estimate the
1889 // offset as the difference between the object offset and the FP location.
1891}
1892
1893/// Insert defining instruction(s) for BaseReg to
1894/// be a pointer to FrameIdx at the beginning of the basic block.
1896 int FrameIdx,
1897 int64_t Offset) const {
1898 unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI;
1899
1901 DebugLoc DL; // Defaults to "unknown"
1902 if (Ins != MBB->end())
1903 DL = Ins->getDebugLoc();
1904
1905 const MachineFunction &MF = *MBB->getParent();
1906 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1907 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1908 const MCInstrDesc &MCID = TII.get(ADDriOpc);
1911 Register BaseReg = MRI.createVirtualRegister(RC);
1912 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
1913
1914 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1915 .addFrameIndex(FrameIdx).addImm(Offset);
1916
1917 return BaseReg;
1918}
1919
1921 int64_t Offset) const {
1922 unsigned FIOperandNum = 0;
1923 while (!MI.getOperand(FIOperandNum).isFI()) {
1924 ++FIOperandNum;
1925 assert(FIOperandNum < MI.getNumOperands() &&
1926 "Instr doesn't have FrameIndex operand!");
1927 }
1928
1929 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
1930 unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1931 Offset += MI.getOperand(OffsetOperandNo).getImm();
1932 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1933
1934 MachineBasicBlock &MBB = *MI.getParent();
1935 MachineFunction &MF = *MBB.getParent();
1936 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1937 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1938 const MCInstrDesc &MCID = MI.getDesc();
1940 MRI.constrainRegClass(BaseReg,
1941 TII.getRegClass(MCID, FIOperandNum, this, MF));
1942}
1943
1945 Register BaseReg,
1946 int64_t Offset) const {
1947 unsigned FIOperandNum = 0;
1948 while (!MI->getOperand(FIOperandNum).isFI()) {
1949 ++FIOperandNum;
1950 assert(FIOperandNum < MI->getNumOperands() &&
1951 "Instr doesn't have FrameIndex operand!");
1952 }
1953
1954 unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
1955 Offset += MI->getOperand(OffsetOperandNo).getImm();
1956
1957 return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
1958 MI->getOpcode() == TargetOpcode::STACKMAP ||
1959 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
1960 (isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0);
1961}
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DEBUG(...)
Definition: Debug.h:106
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
static cl::opt< bool > EnableBasePointer("m68k-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
cl::opt< bool > DisableAutoPairedVecSt("disable-auto-paired-vec-st", cl::desc("disable automatically generated 32byte paired vector stores"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > MaxCRBitSpillDist("ppc-max-crbit-spill-dist", cl::desc("Maximum search distance for definition of CR bit " "spill on ppc"), cl::Hidden, cl::init(100))
static cl::opt< bool > EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static cl::opt< bool > EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false), cl::desc("Enable spills from gpr to vsr rather than stack"))
static cl::opt< bool > ReportAccMoves("ppc-report-acc-moves", cl::desc("Emit information about accumulator register spills " "and copies"), cl::Hidden, cl::init(false))
static void emitWAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsRestore)
static unsigned getOffsetONFromFION(const MachineInstr &MI, unsigned FIOperandNum)
static unsigned offsetMinAlignForOpcode(unsigned OpC)
static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed, bool IsRestore)
static unsigned offsetMinAlign(const MachineInstr &MI)
static cl::opt< bool > StackPtrConst("ppc-stack-ptr-caller-preserved", cl::desc("Consider R1 caller preserved so stack saves of " "caller preserved registers can be LICM candidates"), cl::init(true), cl::Hidden)
static void spillRegPairs(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, DebugLoc DL, const TargetInstrInfo &TII, Register SrcReg, unsigned FrameIndex, bool IsLittleEndian, bool IsKilled, bool TwoPairs)
static cl::opt< bool > AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false), cl::desc("Force the use of a base pointer in every function"))
cl::opt< bool > DisableAutoPairedVecSt
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static const TargetRegisterClass * getMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg, TypeT Ty)
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
bool any() const
any - Returns true if any bit is set.
Definition: BitVector.h:170
bool none() const
none - Returns true if none of the bits are set.
Definition: BitVector.h:188
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:152
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
reverse_iterator rend()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool hasInlineAsm() const
Returns true if the function contains any inline assembly.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isAllocatable(MCRegister PhysReg) const
isAllocatable - Returns true when PhysReg belongs to an allocatable register class and it hasn't been...
uint64_t determineFrameLayout(const MachineFunction &MF, bool UseEstimate=false, unsigned *NewMaxCallFrameSize=nullptr) const
Determine the frame layout but do not update the machine function.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register getFrameRegister(const MachineFunction &MF) const override
bool hasBasePointer(const MachineFunction &MF) const
Register getBaseRegister(const MachineFunction &MF) const
void prepareDynamicAlloca(MachineBasicBlock::iterator II, Register &NegSizeReg, bool &KillNegSizeReg, Register &FramePointer) const
To accomplish dynamic stack allocation, we have to calculate exact size subtracted from the stack poi...
void lowerCRBitSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
void lowerACCSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerACCSpilling - Generate the code for spilling the accumulator register.
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
void lowerCRSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerCRSpilling - Generate the code for spilling a CR register.
void lowerDynamicAreaOffset(MachineBasicBlock::iterator II) const
void lowerDynamicAlloc(MachineBasicBlock::iterator II) const
lowerDynamicAlloc - Generate the code for allocating an object in the current frame.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg, int &FrameIdx) const override
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
getPointerRegClass - Return the register class to use to hold pointers.
bool isCallerPreservedPhysReg(MCRegister PhysReg, const MachineFunction &MF) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
Returns true if the instruction's frame index reference would be better served by a base register oth...
const uint32_t * getNoPreservedMask() const override
void lowerCRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
void lowerQuadwordRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerQuadwordRestore - Generate code to restore paired general register.
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
void lowerCRBitRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
void lowerWACCRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerWACCRestore - Generate the code to restore the wide accumulator register.
void lowerPrepareProbedAlloca(MachineBasicBlock::iterator II) const
void lowerQuadwordSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerQuadwordSpilling - Generate code to spill paired general register.
PPCRegisterInfo(const PPCTargetMachine &TM)
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerWACCSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerWACCSpilling - Generate the code for spilling the wide accumulator register.
void lowerOctWordSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
Remove any STXVP[X] instructions and split them out into a pair of STXV[X] instructions if –disable-a...
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
void lowerACCRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerACCRestore - Generate the code to restore the accumulator register.
bool is32BitELFABI() const
Definition: PPCSubtarget.h:224
bool isAIXABI() const
Definition: PPCSubtarget.h:219
bool isUsingPCRelativeCalls() const
const PPCInstrInfo * getInstrInfo() const override
Definition: PPCSubtarget.h:150
bool isSVR4ABI() const
Definition: PPCSubtarget.h:220
bool isLittleEndian() const
Definition: PPCSubtarget.h:186
MCRegister getTOCPointerRegister() const
Definition: PPCSubtarget.h:284
MCRegister getStackPointerRegister() const
Definition: PPCSubtarget.h:296
bool is64BitELFABI() const
Definition: PPCSubtarget.h:223
const PPCRegisterInfo * getRegisterInfo() const override
Definition: PPCSubtarget.h:157
Common code between 32-bit and 64-bit PowerPC targets.
BitVector getRegsAvailable(const TargetRegisterClass *RC)
Return all available registers in the register class in Mask.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
bool isPositionIndependent() const
bool getAIXExtendedAltivecABI() const
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
ArrayRef< unsigned > superclasses() const
Returns a list of super-classes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const
Returns the largest super class of RC that is legal to use in the current sub-target and has the same...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:90
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition: VirtRegMap.h:86
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
unsigned getKillRegState(bool B)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85