LLVM 22.0.0git
ARMBaseRegisterInfo.cpp
Go to the documentation of this file.
1//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the base ARM implementation of TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARMBaseRegisterInfo.h"
14#include "ARM.h"
15#include "ARMBaseInstrInfo.h"
16#include "ARMFrameLowering.h"
18#include "ARMSubtarget.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/STLExtras.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DebugLoc.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/Type.h"
41#include "llvm/MC/MCInstrDesc.h"
42#include "llvm/Support/Debug.h"
47#include <cassert>
48#include <utility>
49
50#define DEBUG_TYPE "arm-register-info"
51
52#define GET_REGINFO_TARGET_DESC
53#include "ARMGenRegisterInfo.inc"
54
55using namespace llvm;
56
61
62const MCPhysReg*
64 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
67 const Function &F = MF->getFunction();
68
69 if (F.getCallingConv() == CallingConv::GHC) {
70 // GHC set of callee saved regs is empty as all those regs are
71 // used for passing STG regs around
72 return CSR_NoRegs_SaveList;
73 } else if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) {
74 return CSR_Win_SplitFP_SaveList;
75 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) {
76 return CSR_Win_AAPCS_CFGuard_Check_SaveList;
77 } else if (F.getCallingConv() == CallingConv::SwiftTail) {
78 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_SaveList
79 : (PushPopSplit == ARMSubtarget::SplitR7
80 ? CSR_ATPCS_SplitPush_SwiftTail_SaveList
81 : CSR_AAPCS_SwiftTail_SaveList);
82 } else if (F.hasFnAttribute("interrupt")) {
83
84 // Don't save the floating point registers if target does not have floating
85 // point registers.
86 if (STI.hasFPRegs() && F.hasFnAttribute("save-fp")) {
87 bool HasNEON = STI.hasNEON();
88
89 if (STI.isMClass()) {
90 assert(!HasNEON && "NEON is only for Cortex-R/A");
91 return PushPopSplit == ARMSubtarget::SplitR7
92 ? CSR_ATPCS_SplitPush_FP_SaveList
93 : CSR_AAPCS_FP_SaveList;
94 }
95 if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
96 return HasNEON ? CSR_FIQ_FP_NEON_SaveList : CSR_FIQ_FP_SaveList;
97 }
98 return HasNEON ? CSR_GenericInt_FP_NEON_SaveList
99 : CSR_GenericInt_FP_SaveList;
100 }
101
102 if (STI.isMClass()) {
103 // M-class CPUs have hardware which saves the registers needed to allow a
104 // function conforming to the AAPCS to function as a handler.
105 return PushPopSplit == ARMSubtarget::SplitR7
106 ? CSR_ATPCS_SplitPush_SaveList
107 : CSR_AAPCS_SaveList;
108 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
109 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
110 // need to be saved to restore user-mode state.
111 return CSR_FIQ_SaveList;
112 } else {
113 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
114 // exception handling.
115 return CSR_GenericInt_SaveList;
116 }
117 }
118
120 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
121 if (STI.isTargetDarwin())
122 return CSR_iOS_SwiftError_SaveList;
123
124 return PushPopSplit == ARMSubtarget::SplitR7
125 ? CSR_ATPCS_SplitPush_SwiftError_SaveList
126 : CSR_AAPCS_SwiftError_SaveList;
127 }
128
129 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS)
130 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
131 ? CSR_iOS_CXX_TLS_PE_SaveList
132 : CSR_iOS_CXX_TLS_SaveList;
133
134 if (STI.isTargetDarwin())
135 return CSR_iOS_SaveList;
136
137 if (PushPopSplit == ARMSubtarget::SplitR7)
138 return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_R7_SaveList
139 : CSR_ATPCS_SplitPush_SaveList;
140
141 if (PushPopSplit == ARMSubtarget::SplitR11AAPCSSignRA)
142 return CSR_AAPCS_SplitPush_R11_SaveList;
143
144 return CSR_AAPCS_SaveList;
145}
146
148 const MachineFunction *MF) const {
149 assert(MF && "Invalid MachineFunction pointer.");
152 return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
153 return nullptr;
154}
155
156const uint32_t *
158 CallingConv::ID CC) const {
159 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
160 if (CC == CallingConv::GHC)
161 // This is academic because all GHC calls are (supposed to be) tail calls
162 return CSR_NoRegs_RegMask;
164 return CSR_Win_AAPCS_CFGuard_Check_RegMask;
165 if (CC == CallingConv::SwiftTail) {
166 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_RegMask
167 : CSR_AAPCS_SwiftTail_RegMask;
168 }
170 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
171 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask
172 : CSR_AAPCS_SwiftError_RegMask;
173
174 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS)
175 return CSR_iOS_CXX_TLS_RegMask;
176 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
177}
178
179const uint32_t*
181 return CSR_NoRegs_RegMask;
182}
183
184const uint32_t *
187 "only know about special TLS call on Darwin");
188 return CSR_iOS_TLSCall_RegMask;
189}
190
191const uint32_t *
193 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
194 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only())
195 return CSR_NoRegs_RegMask;
196 else
197 return CSR_FPRegs_RegMask;
198}
199
200const uint32_t *
202 CallingConv::ID CC) const {
203 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
204 // This should return a register mask that is the same as that returned by
205 // getCallPreservedMask but that additionally preserves the register used for
206 // the first i32 argument (which must also be the register used to return a
207 // single i32 return value)
208 //
209 // In case that the calling convention does not use the same register for
210 // both or otherwise does not want to enable this optimization, the function
211 // should return NULL
212 if (CC == CallingConv::GHC)
213 // This is academic because all GHC calls are (supposed to be) tail calls
214 return nullptr;
215 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
216 : CSR_AAPCS_ThisReturn_RegMask;
217}
218
220 const MachineFunction *MF) const {
221 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12};
222 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs);
223}
224
226getReservedRegs(const MachineFunction &MF) const {
227 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
228 const ARMFrameLowering *TFI = getFrameLowering(MF);
229
230 // FIXME: avoid re-calculating this every time.
231 BitVector Reserved(getNumRegs());
232 markSuperRegs(Reserved, ARM::SP);
233 markSuperRegs(Reserved, ARM::PC);
234 markSuperRegs(Reserved, ARM::FPSCR);
235 markSuperRegs(Reserved, ARM::APSR_NZCV);
236 if (TFI->isFPReserved(MF))
237 markSuperRegs(Reserved, STI.getFramePointerReg());
238 if (hasBasePointer(MF))
239 markSuperRegs(Reserved, BasePtr);
240 // Some targets reserve R9.
241 if (STI.isR9Reserved())
242 markSuperRegs(Reserved, ARM::R9);
243 // Reserve D16-D31 if the subtarget doesn't support them.
244 if (!STI.hasD32()) {
245 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!");
246 for (unsigned R = 0; R < 16; ++R)
247 markSuperRegs(Reserved, ARM::D16 + R);
248 }
249 const TargetRegisterClass &RC = ARM::GPRPairRegClass;
250 for (unsigned Reg : RC)
251 for (MCPhysReg S : subregs(Reg))
252 if (Reserved.test(S))
253 markSuperRegs(Reserved, Reg);
254 // For v8.1m architecture
255 markSuperRegs(Reserved, ARM::ZR);
256
257 assert(checkAllSuperRegsMarked(Reserved));
258 return Reserved;
259}
260
262isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const {
263 return !getReservedRegs(MF).test(PhysReg);
264}
265
267 MCRegister PhysReg) const {
268 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
269 const ARMFrameLowering *TFI = getFrameLowering(MF);
270
271 BitVector Reserved(getNumRegs());
272 markSuperRegs(Reserved, ARM::PC);
273 if (TFI->isFPReserved(MF))
274 markSuperRegs(Reserved, STI.getFramePointerReg());
275 if (hasBasePointer(MF))
276 markSuperRegs(Reserved, BasePtr);
277 assert(checkAllSuperRegsMarked(Reserved));
278 return Reserved.test(PhysReg.id());
279}
280
283 const MachineFunction &MF) const {
284 unsigned SuperID = RC->getID();
285 auto I = RC->superclasses().begin();
286 auto E = RC->superclasses().end();
287 do {
288 switch (SuperID) {
289 case ARM::GPRRegClassID:
290 case ARM::SPRRegClassID:
291 case ARM::DPRRegClassID:
292 case ARM::GPRPairRegClassID:
293 return getRegClass(SuperID);
294 case ARM::QPRRegClassID:
295 case ARM::QQPRRegClassID:
296 case ARM::QQQQPRRegClassID:
297 if (MF.getSubtarget<ARMSubtarget>().hasNEON())
298 return getRegClass(SuperID);
299 break;
300 case ARM::MQPRRegClassID:
301 case ARM::MQQPRRegClassID:
302 case ARM::MQQQQPRRegClassID:
303 if (MF.getSubtarget<ARMSubtarget>().hasMVEIntegerOps())
304 return getRegClass(SuperID);
305 break;
306 }
307 SuperID = (I != E) ? *I++ : ~0U;
308 } while (SuperID != ~0U);
309 return RC;
310}
311
314 return &ARM::GPRRegClass;
315}
316
319 if (RC == &ARM::CCRRegClass)
320 return &ARM::rGPRRegClass; // Can't copy CCR registers.
321 if (RC == &ARM::cl_FPSCR_NZCVRegClass)
322 return &ARM::rGPRRegClass;
323 return RC;
324}
325
326unsigned
328 MachineFunction &MF) const {
329 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
330 const ARMFrameLowering *TFI = getFrameLowering(MF);
331
332 switch (RC->getID()) {
333 default:
334 return 0;
335 case ARM::tGPRRegClassID: {
336 // hasFP ends up calling getMaxCallFrameComputed() which may not be
337 // available when getPressureLimit() is called as part of
338 // ScheduleDAGRRList.
339 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
340 ? TFI->hasFP(MF) : true;
341 return 5 - HasFP;
342 }
343 case ARM::GPRRegClassID: {
344 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
345 ? TFI->hasFP(MF) : true;
346 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
347 }
348 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
349 case ARM::DPRRegClassID:
350 return 32 - 10;
351 }
352}
353
354// Get the other register in a GPRPair.
356 const MCRegisterInfo *RI) {
357 for (MCPhysReg Super : RI->superregs(Reg))
358 if (ARM::GPRPairRegClass.contains(Super))
359 return RI->getSubReg(Super, Odd ? ARM::gsub_1 : ARM::gsub_0);
360 return MCRegister();
361}
362
363// Resolve the RegPairEven / RegPairOdd register allocator hints.
365 Register VirtReg, ArrayRef<MCPhysReg> Order,
367 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
368 const MachineRegisterInfo &MRI = MF.getRegInfo();
369 std::pair<unsigned, Register> Hint = MRI.getRegAllocationHint(VirtReg);
370
371 unsigned Odd;
372 switch (Hint.first) {
374 Odd = 0;
375 break;
377 Odd = 1;
378 break;
379 case ARMRI::RegLR:
380 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
381 if (MRI.getRegClass(VirtReg)->contains(ARM::LR))
382 Hints.push_back(ARM::LR);
383 return false;
384 default:
385 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
386 }
387
388 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
389 // Check if the other part of the pair has already been assigned, and provide
390 // the paired register as the first hint.
391 Register Paired = Hint.second;
392 if (!Paired)
393 return false;
394
395 Register PairedPhys;
396 if (Paired.isPhysical()) {
397 PairedPhys = Paired;
398 } else if (VRM && VRM->hasPhys(Paired)) {
399 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
400 }
401
402 // First prefer the paired physreg.
403 if (PairedPhys && is_contained(Order, PairedPhys))
404 Hints.push_back(PairedPhys);
405
406 // Then prefer even or odd registers.
407 for (MCPhysReg Reg : Order) {
408 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
409 continue;
410 // Don't provide hints that are paired to a reserved register.
411 MCRegister Paired = getPairedGPR(Reg, !Odd, this);
412 if (!Paired || MRI.isReserved(Paired))
413 continue;
414 Hints.push_back(Reg);
415 }
416 return false;
417}
418
420 MachineFunction &MF) const {
422 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(Reg);
423 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) &&
424 Hint.second.isVirtual()) {
425 // If 'Reg' is one of the even / odd register pair and it's now changed
426 // (e.g. coalesced) into a different register. The other register of the
427 // pair allocation hint must be updated to reflect the relationship
428 // change.
429 Register OtherReg = Hint.second;
430 Hint = MRI->getRegAllocationHint(OtherReg);
431 // Make sure the pair has not already divorced.
432 if (Hint.second == Reg) {
433 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
434 if (NewReg.isVirtual())
435 MRI->setRegAllocationHint(NewReg,
436 Hint.first == ARMRI::RegPairOdd
439 OtherReg);
440 }
441 }
442}
443
445 const MachineFrameInfo &MFI = MF.getFrameInfo();
446 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
447 const ARMFrameLowering *TFI = getFrameLowering(MF);
448
449 // If we have stack realignment and VLAs, we have no pointer to use to
450 // access the stack. If we have stack realignment, and a large call frame,
451 // we have no place to allocate the emergency spill slot.
452 if (hasStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
453 return true;
454
455 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
456 // negative range for ldr/str (255), and Thumb1 is positive offsets only.
457 //
458 // It's going to be better to use the SP or Base Pointer instead. When there
459 // are variable sized objects, we can't reference off of the SP, so we
460 // reserve a Base Pointer.
461 //
462 // For Thumb2, estimate whether a negative offset from the frame pointer
463 // will be sufficient to reach the whole stack frame. If a function has a
464 // smallish frame, it's less likely to have lots of spills and callee saved
465 // space, so it's all more likely to be within range of the frame pointer.
466 // If it's wrong, the scavenger will still enable access to work, it just
467 // won't be optimal. (We should always be able to reach the emergency
468 // spill slot from the frame pointer.)
469 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() &&
470 MFI.getLocalFrameSize() >= 128)
471 return true;
472 // For Thumb1, if sp moves, nothing is in range, so force a base pointer.
473 // This is necessary for correctness in cases where we need an emergency
474 // spill slot. (In Thumb1, we can't use a negative offset from the frame
475 // pointer.)
476 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF))
477 return true;
478 return false;
479}
480
482 const MachineRegisterInfo *MRI = &MF.getRegInfo();
483 const ARMFrameLowering *TFI = getFrameLowering(MF);
484 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
485 // We can't realign the stack if:
486 // 1. Dynamic stack realignment is explicitly disabled,
487 // 2. There are VLAs in the function and the base pointer is disabled.
489 return false;
490 // Stack realignment requires a frame pointer. If we already started
491 // register allocation with frame pointer elimination, it is too late now.
492 if (!MRI->canReserveReg(STI.getFramePointerReg()))
493 return false;
494 // We may also need a base pointer if there are dynamic allocas or stack
495 // pointer adjustments around calls.
496 if (TFI->hasReservedCallFrame(MF))
497 return true;
498 // A base pointer is required and allowed. Check that it isn't too late to
499 // reserve it.
500 return MRI->canReserveReg(BasePtr);
501}
502
504cannotEliminateFrame(const MachineFunction &MF) const {
505 const MachineFrameInfo &MFI = MF.getFrameInfo();
507 return true;
508 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
509 hasStackRealignment(MF);
510}
511
514 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
515 const ARMFrameLowering *TFI = getFrameLowering(MF);
516
517 if (TFI->hasFP(MF))
518 return STI.getFramePointerReg();
519 return ARM::SP;
520}
521
522/// emitLoadConstPool - Emits a load from constpool to materialize the
523/// specified immediate.
526 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
527 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
528 MachineFunction &MF = *MBB.getParent();
531 const Constant *C =
532 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val);
533 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
534
535 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
536 .addReg(DestReg, getDefRegState(true), SubIdx)
538 .addImm(0)
539 .add(predOps(Pred, PredReg))
540 .setMIFlags(MIFlags);
541}
542
545 return true;
546}
547
550 return true;
551}
552
555 return true;
556}
557
559getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
560 const MCInstrDesc &Desc = MI->getDesc();
561 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
562 int64_t InstrOffs = 0;
563 int Scale = 1;
564 unsigned ImmIdx = 0;
565 switch (AddrMode) {
571 InstrOffs = MI->getOperand(Idx+1).getImm();
572 Scale = 1;
573 break;
574 case ARMII::AddrMode5: {
575 // VFP address mode.
576 const MachineOperand &OffOp = MI->getOperand(Idx+1);
577 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
578 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
579 InstrOffs = -InstrOffs;
580 Scale = 4;
581 break;
582 }
583 case ARMII::AddrMode2:
584 ImmIdx = Idx+2;
585 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
586 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
587 InstrOffs = -InstrOffs;
588 break;
589 case ARMII::AddrMode3:
590 ImmIdx = Idx+2;
591 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
592 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
593 InstrOffs = -InstrOffs;
594 break;
596 ImmIdx = Idx+1;
597 InstrOffs = MI->getOperand(ImmIdx).getImm();
598 Scale = 4;
599 break;
600 default:
601 llvm_unreachable("Unsupported addressing mode!");
602 }
603
604 return InstrOffs * Scale;
605}
606
607/// needsFrameBaseReg - Returns true if the instruction's frame index
608/// reference would be better served by a base register other than FP
609/// or SP. Used by LocalStackFrameAllocation to determine which frame index
610/// references it should create new base registers for.
612needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
613 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
614 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
615 }
616
617 // It's the load/store FI references that cause issues, as it can be difficult
618 // to materialize the offset if it won't fit in the literal field. Estimate
619 // based on the size of the local frame and some conservative assumptions
620 // about the rest of the stack frame (note, this is pre-regalloc, so
621 // we don't know everything for certain yet) whether this offset is likely
622 // to be out of range of the immediate. Return true if so.
623
624 // We only generate virtual base registers for loads and stores, so
625 // return false for everything else.
626 unsigned Opc = MI->getOpcode();
627 switch (Opc) {
628 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
629 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
630 case ARM::t2LDRi12: case ARM::t2LDRi8:
631 case ARM::t2STRi12: case ARM::t2STRi8:
632 case ARM::VLDRS: case ARM::VLDRD:
633 case ARM::VSTRS: case ARM::VSTRD:
634 case ARM::tSTRspi: case ARM::tLDRspi:
635 break;
636 default:
637 return false;
638 }
639
640 // Without a virtual base register, if the function has variable sized
641 // objects, all fixed-size local references will be via the frame pointer,
642 // Approximate the offset and see if it's legal for the instruction.
643 // Note that the incoming offset is based on the SP value at function entry,
644 // so it'll be negative.
645 MachineFunction &MF = *MI->getParent()->getParent();
646 const ARMFrameLowering *TFI = getFrameLowering(MF);
647 MachineFrameInfo &MFI = MF.getFrameInfo();
649
650 // Estimate an offset from the frame pointer.
651 // Conservatively assume all callee-saved registers get pushed. R4-R6
652 // will be earlier than the FP, so we ignore those.
653 // R7, LR
654 int64_t FPOffset = Offset - 8;
655 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
656 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
657 FPOffset -= 80;
658 // Estimate an offset from the stack pointer.
659 // The incoming offset is relating to the SP at the start of the function,
660 // but when we access the local it'll be relative to the SP after local
661 // allocation, so adjust our SP-relative offset by that allocation size.
662 Offset += MFI.getLocalFrameSize();
663 // Assume that we'll have at least some spill slots allocated.
664 // FIXME: This is a total SWAG number. We should run some statistics
665 // and pick a real one.
666 Offset += 128; // 128 bytes of spill slots
667
668 // If there's a frame pointer and the addressing mode allows it, try using it.
669 // The FP is only available if there is no dynamic realignment. We
670 // don't know for sure yet whether we'll need that, so we guess based
671 // on whether there are any local variables that would trigger it.
672 if (TFI->hasFP(MF) &&
673 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) &&
674 canRealignStack(MF))) {
675 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
676 return false;
677 }
678 // If we can reference via the stack pointer, try that.
679 // FIXME: This (and the code that resolves the references) can be improved
680 // to only disallow SP relative references in the live range of
681 // the VLA(s). In practice, it's unclear how much difference that
682 // would make, but it may be worth doing.
683 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset))
684 return false;
685
686 // The offset likely isn't legal, we want to allocate a virtual base register.
687 return true;
688}
689
690/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
691/// be a pointer to FrameIdx at the beginning of the basic block.
694 int FrameIdx,
695 int64_t Offset) const {
696 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
697 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
698 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
699
700 MachineBasicBlock::iterator Ins = MBB->begin();
701 DebugLoc DL; // Defaults to "unknown"
702 if (Ins != MBB->end())
703 DL = Ins->getDebugLoc();
704
705 const MachineFunction &MF = *MBB->getParent();
706 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
708 const MCInstrDesc &MCID = TII.get(ADDriOpc);
709 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
710 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
711
712 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
713 .addFrameIndex(FrameIdx).addImm(Offset);
714
715 if (!AFI->isThumb1OnlyFunction())
717
718 return BaseReg;
719}
720
722 int64_t Offset) const {
723 MachineBasicBlock &MBB = *MI.getParent();
724 MachineFunction &MF = *MBB.getParent();
725 const ARMBaseInstrInfo &TII =
726 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
728 int Off = Offset; // ARM doesn't need the general 64-bit offsets
729 unsigned i = 0;
730
732 "This resolveFrameIndex does not support Thumb1!");
733
734 while (!MI.getOperand(i).isFI()) {
735 ++i;
736 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
737 }
738 bool Done = false;
739 if (!AFI->isThumbFunction())
740 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
741 else {
742 assert(AFI->isThumb2Function());
743 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this);
744 }
745 assert(Done && "Unable to resolve frame index!");
746 (void)Done;
747}
748
750 Register BaseReg,
751 int64_t Offset) const {
752 const MCInstrDesc &Desc = MI->getDesc();
753 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
754 unsigned i = 0;
755 for (; !MI->getOperand(i).isFI(); ++i)
756 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!");
757
758 // AddrMode4 and AddrMode6 cannot handle any offset.
760 return Offset == 0;
761
762 unsigned NumBits = 0;
763 unsigned Scale = 1;
764 bool isSigned = true;
765 switch (AddrMode) {
770 // i8 supports only negative, and i12 supports only positive, so
771 // based on Offset sign, consider the appropriate instruction
772 Scale = 1;
773 if (Offset < 0) {
774 NumBits = 8;
775 Offset = -Offset;
776 } else {
777 NumBits = 12;
778 }
779 break;
780 case ARMII::AddrMode5:
781 // VFP address mode.
782 NumBits = 8;
783 Scale = 4;
784 break;
786 case ARMII::AddrMode2:
787 NumBits = 12;
788 break;
789 case ARMII::AddrMode3:
790 NumBits = 8;
791 break;
793 NumBits = (BaseReg == ARM::SP ? 8 : 5);
794 Scale = 4;
795 isSigned = false;
796 break;
797 default:
798 llvm_unreachable("Unsupported addressing mode!");
799 }
800
802 // Make sure the offset is encodable for instructions that scale the
803 // immediate.
804 if ((Offset & (Scale-1)) != 0)
805 return false;
806
807 if (isSigned && Offset < 0)
808 Offset = -Offset;
809
810 unsigned Mask = (1 << NumBits) - 1;
811 if ((unsigned)Offset <= Mask * Scale)
812 return true;
813
814 return false;
815}
816
817bool
819 int SPAdj, unsigned FIOperandNum,
820 RegScavenger *RS) const {
821 MachineInstr &MI = *II;
822 MachineBasicBlock &MBB = *MI.getParent();
823 MachineFunction &MF = *MBB.getParent();
824 const ARMBaseInstrInfo &TII =
825 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
826 const ARMFrameLowering *TFI = getFrameLowering(MF);
829 "This eliminateFrameIndex does not support Thumb1!");
830 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
831 Register FrameReg;
832
833 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
834
835 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
836 // call frame setup/destroy instructions have already been eliminated. That
837 // means the stack pointer cannot be used to access the emergency spill slot
838 // when !hasReservedCallFrame().
839#ifndef NDEBUG
840 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
841 assert(TFI->hasReservedCallFrame(MF) &&
842 "Cannot use SP to access the emergency spill slot in "
843 "functions without a reserved call frame");
845 "Cannot use SP to access the emergency spill slot in "
846 "functions with variable sized frame objects");
847 }
848#endif // NDEBUG
849
850 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
851
852 // Modify MI as necessary to handle as much of 'Offset' as possible
853 bool Done = false;
854 if (!AFI->isThumbFunction())
855 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
856 else {
857 assert(AFI->isThumb2Function());
858 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this);
859 }
860 if (Done)
861 return false;
862
863 // If we get here, the immediate doesn't fit into the instruction. We folded
864 // as much as possible above, handle the rest, providing a register that is
865 // SP+LargeImm.
866 assert(
867 (Offset ||
868 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
869 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 ||
870 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 ||
871 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 ||
872 (MI.getDesc().TSFlags & ARMII::AddrModeMask) ==
874 "This code isn't needed if offset already handled!");
875
876 unsigned ScratchReg = 0;
877 int PIdx = MI.findFirstPredOperandIdx();
878 ARMCC::CondCodes Pred = (PIdx == -1)
879 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
880 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
881
882 const MCInstrDesc &MCID = MI.getDesc();
883 const TargetRegisterClass *RegClass =
884 TII.getRegClass(MCID, FIOperandNum, this);
885
886 if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg)))
887 // Must be addrmode4/6.
888 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
889 else {
890 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass);
891 if (!AFI->isThumbFunction())
892 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
893 Offset, Pred, PredReg, TII);
894 else {
895 assert(AFI->isThumb2Function());
896 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
897 Offset, Pred, PredReg, TII);
898 }
899 // Update the original instruction to use the scratch register.
900 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
901 }
902 return false;
903}
904
906 const TargetRegisterClass *SrcRC,
907 unsigned SubReg,
908 const TargetRegisterClass *DstRC,
909 unsigned DstSubReg,
910 const TargetRegisterClass *NewRC,
911 LiveIntervals &LIS) const {
912 auto MBB = MI->getParent();
913 auto MF = MBB->getParent();
914 const MachineRegisterInfo &MRI = MF->getRegInfo();
915 // If not copying into a sub-register this should be ok because we shouldn't
916 // need to split the reg.
917 if (!DstSubReg)
918 return true;
919 // Small registers don't frequently cause a problem, so we can coalesce them.
920 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 &&
921 getRegSizeInBits(*SrcRC) < 256)
922 return true;
923
924 auto NewRCWeight =
925 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
926 auto SrcRCWeight =
927 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
928 auto DstRCWeight =
929 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
930 // If the source register class is more expensive than the destination, the
931 // coalescing is probably profitable.
932 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
933 return true;
934 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
935 return true;
936
937 // If the register allocator isn't constrained, we can always allow coalescing
938 // unfortunately we don't know yet if we will be constrained.
939 // The goal of this heuristic is to restrict how many expensive registers
940 // we allow to coalesce in a given basic block.
941 auto AFI = MF->getInfo<ARMFunctionInfo>();
942 auto It = AFI->getCoalescedWeight(MBB);
943
944 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
945 << It->second << "\n");
946 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
947 << NewRCWeight.RegWeight << "\n");
948
949 // This number is the largest round number that which meets the criteria:
950 // (1) addresses PR18825
951 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
952 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
953 // In practice the SizeMultiplier will only factor in for straight line code
954 // that uses a lot of NEON vectors, which isn't terribly common.
955 unsigned SizeMultiplier = MBB->size()/100;
956 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
957 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
958 It->second += NewRCWeight.RegWeight;
959 return true;
960 }
961 return false;
962}
963
965 unsigned DefSubReg,
966 const TargetRegisterClass *SrcRC,
967 unsigned SrcSubReg) const {
968 // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2).
969 if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 &&
970 SrcRC == &ARM::DPRRegClass &&
971 (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1))
972 return false;
973
974 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
975 SrcRC, SrcSubReg);
976}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCRegister getPairedGPR(MCRegister Reg, bool Odd, const MCRegisterInfo *RI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool hasBasePointer(const MachineFunction &MF) const
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const override
const uint32_t * getNoPreservedMask() const override
bool isInlineAsmReadOnlyReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
virtual void emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred=ARMCC::AL, Register PredReg=Register(), unsigned MIFlags=MachineInstr::NoFlags) const
emitLoadConstPool - Emits a load from constpool to materialize the specified immediate.
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
bool cannotEliminateFrame(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx...
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Code Generation virtual methods...
const uint32_t * getTLSCallPreservedMask(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
unsigned BasePtr
BasePtr - ARM physical register used as a base ptr in complex stack frames.
ArrayRef< MCPhysReg > getIntraCallClobberedRegs(const MachineFunction *MF) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool canRealignStack(const MachineFunction &MF) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
int ResolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, int SPAdj) const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
DenseMap< constMachineBasicBlock *, unsigned >::iterator getCoalescedWeight(MachineBasicBlock *MBB)
bool isThumb1Only() const
MCPhysReg getFramePointerReg() const
const ARMTargetLowering * getTargetLowering() const override
bool isTargetDarwin() const
bool hasVFP2Base() const
bool isR9Reserved() const
enum PushPopSplitVariation getPushPopSplitVariation(const MachineFunction &MF) const
PushPopSplitVariation
How the push and pop instructions of callee saved general-purpose registers should be split.
@ SplitR11WindowsSEH
When the stack frame size is not known (because of variable-sized objects or realignment),...
@ SplitR7
R7 and LR must be adjacent, because R7 is the frame pointer, and must point to a frame record consist...
@ SplitR11AAPCSSignRA
When generating AAPCS-compilant frame chains, R11 is the frame pointer, and must be pushed adjacent t...
bool isMClass() const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
iterator end() const
Definition ArrayRef.h:136
iterator begin() const
Definition ArrayRef.h:135
bool test(unsigned Idx) const
Definition BitVector.h:480
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
Describe properties that are true of each instruction in the target description file.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
iterator_range< MCSuperRegIterator > superregs(MCRegister Reg) const
Return an iterator range over all super-registers of Reg, excluding Reg.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
constexpr unsigned id() const
Definition MCRegister.h:74
MachineInstrBundleIterator< MachineInstr > iterator
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
bool isMaxCallFrameSizeComputed() const
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
ArrayRef< unsigned > superclasses() const
Returns a list of super-classes.
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned getAM2Offset(unsigned AM2Opc)
AddrOpc getAM5Op(unsigned AM5Opc)
unsigned char getAM5Offset(unsigned AM5Opc)
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
Define some predicates that are used for node matching.
Definition ARMEHABI.h:25
@ D16
Only 16 D registers.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition CallingConv.h:82
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Done
Definition Threading.h:60
bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII, const TargetRegisterInfo *TRI)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
Op::Description Desc
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
unsigned getDefRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
void emitT2RegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39