LLVM 22.0.0git
ARMBaseRegisterInfo.cpp
Go to the documentation of this file.
1//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the base ARM implementation of TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARMBaseRegisterInfo.h"
14#include "ARM.h"
15#include "ARMBaseInstrInfo.h"
16#include "ARMFrameLowering.h"
18#include "ARMSubtarget.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/STLExtras.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DebugLoc.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/Type.h"
41#include "llvm/MC/MCInstrDesc.h"
42#include "llvm/Support/Debug.h"
47#include <cassert>
48#include <utility>
49
50#define DEBUG_TYPE "arm-register-info"
51
52#define GET_REGINFO_TARGET_DESC
53#include "ARMGenRegisterInfo.inc"
54
55using namespace llvm;
56
61
62const MCPhysReg*
64 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
67 const Function &F = MF->getFunction();
68
69 if (F.getCallingConv() == CallingConv::GHC) {
70 // GHC set of callee saved regs is empty as all those regs are
71 // used for passing STG regs around
72 return CSR_NoRegs_SaveList;
73 } else if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) {
74 return CSR_Win_SplitFP_SaveList;
75 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) {
76 return CSR_Win_AAPCS_CFGuard_Check_SaveList;
77 } else if (F.getCallingConv() == CallingConv::SwiftTail) {
78 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_SaveList
79 : (PushPopSplit == ARMSubtarget::SplitR7
80 ? CSR_ATPCS_SplitPush_SwiftTail_SaveList
81 : CSR_AAPCS_SwiftTail_SaveList);
82 } else if (F.hasFnAttribute("interrupt")) {
83
84 // Don't save the floating point registers if target does not have floating
85 // point registers.
86 if (STI.hasFPRegs() && F.hasFnAttribute("save-fp")) {
87 bool HasNEON = STI.hasNEON();
88
89 if (STI.isMClass()) {
90 assert(!HasNEON && "NEON is only for Cortex-R/A");
91 return PushPopSplit == ARMSubtarget::SplitR7
92 ? CSR_ATPCS_SplitPush_FP_SaveList
93 : CSR_AAPCS_FP_SaveList;
94 }
95 if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
96 return HasNEON ? CSR_FIQ_FP_NEON_SaveList : CSR_FIQ_FP_SaveList;
97 }
98 return HasNEON ? CSR_GenericInt_FP_NEON_SaveList
99 : CSR_GenericInt_FP_SaveList;
100 }
101
102 if (STI.isMClass()) {
103 // M-class CPUs have hardware which saves the registers needed to allow a
104 // function conforming to the AAPCS to function as a handler.
105 return PushPopSplit == ARMSubtarget::SplitR7
106 ? CSR_ATPCS_SplitPush_SaveList
107 : CSR_AAPCS_SaveList;
108 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
109 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
110 // need to be saved to restore user-mode state.
111 return CSR_FIQ_SaveList;
112 } else {
113 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
114 // exception handling.
115 return CSR_GenericInt_SaveList;
116 }
117 }
118
120 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
121 if (STI.isTargetDarwin())
122 return CSR_iOS_SwiftError_SaveList;
123
124 return PushPopSplit == ARMSubtarget::SplitR7
125 ? CSR_ATPCS_SplitPush_SwiftError_SaveList
126 : CSR_AAPCS_SwiftError_SaveList;
127 }
128
129 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS)
130 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
131 ? CSR_iOS_CXX_TLS_PE_SaveList
132 : CSR_iOS_CXX_TLS_SaveList;
133
134 if (STI.isTargetDarwin())
135 return CSR_iOS_SaveList;
136
137 if (PushPopSplit == ARMSubtarget::SplitR7)
138 return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_R7_SaveList
139 : CSR_ATPCS_SplitPush_SaveList;
140
141 if (PushPopSplit == ARMSubtarget::SplitR11AAPCSSignRA)
142 return CSR_AAPCS_SplitPush_R11_SaveList;
143
144 return CSR_AAPCS_SaveList;
145}
146
148 const MachineFunction *MF) const {
149 assert(MF && "Invalid MachineFunction pointer.");
152 return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
153 return nullptr;
154}
155
156const uint32_t *
158 CallingConv::ID CC) const {
159 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
160 if (CC == CallingConv::GHC)
161 // This is academic because all GHC calls are (supposed to be) tail calls
162 return CSR_NoRegs_RegMask;
164 return CSR_Win_AAPCS_CFGuard_Check_RegMask;
165 if (CC == CallingConv::SwiftTail) {
166 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_RegMask
167 : CSR_AAPCS_SwiftTail_RegMask;
168 }
170 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
171 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask
172 : CSR_AAPCS_SwiftError_RegMask;
173
174 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS)
175 return CSR_iOS_CXX_TLS_RegMask;
176 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
177}
178
179const uint32_t*
181 return CSR_NoRegs_RegMask;
182}
183
184const uint32_t *
187 "only know about special TLS call on Darwin");
188 return CSR_iOS_TLSCall_RegMask;
189}
190
191const uint32_t *
193 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
194 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only())
195 return CSR_NoRegs_RegMask;
196 else
197 return CSR_FPRegs_RegMask;
198}
199
200const uint32_t *
202 CallingConv::ID CC) const {
203 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
204 // This should return a register mask that is the same as that returned by
205 // getCallPreservedMask but that additionally preserves the register used for
206 // the first i32 argument (which must also be the register used to return a
207 // single i32 return value)
208 //
209 // In case that the calling convention does not use the same register for
210 // both or otherwise does not want to enable this optimization, the function
211 // should return NULL
212 if (CC == CallingConv::GHC)
213 // This is academic because all GHC calls are (supposed to be) tail calls
214 return nullptr;
215 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
216 : CSR_AAPCS_ThisReturn_RegMask;
217}
218
220 const MachineFunction *MF) const {
221 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12};
222 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs);
223}
224
226getReservedRegs(const MachineFunction &MF) const {
227 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
228 const ARMFrameLowering *TFI = getFrameLowering(MF);
229
230 // FIXME: avoid re-calculating this every time.
231 BitVector Reserved(getNumRegs());
232 markSuperRegs(Reserved, ARM::SP);
233 markSuperRegs(Reserved, ARM::PC);
234 markSuperRegs(Reserved, ARM::FPSCR);
235 markSuperRegs(Reserved, ARM::APSR_NZCV);
236 if (TFI->isFPReserved(MF))
237 markSuperRegs(Reserved, STI.getFramePointerReg());
238 if (hasBasePointer(MF))
239 markSuperRegs(Reserved, BasePtr);
240 // Some targets reserve R9.
241 if (STI.isR9Reserved())
242 markSuperRegs(Reserved, ARM::R9);
243 // Reserve D16-D31 if the subtarget doesn't support them.
244 if (!STI.hasD32()) {
245 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!");
246 for (unsigned R = 0; R < 16; ++R)
247 markSuperRegs(Reserved, ARM::D16 + R);
248 }
249 const TargetRegisterClass &RC = ARM::GPRPairRegClass;
250 for (unsigned Reg : RC)
251 for (MCPhysReg S : subregs(Reg))
252 if (Reserved.test(S))
253 markSuperRegs(Reserved, Reg);
254 // For v8.1m architecture
255 markSuperRegs(Reserved, ARM::ZR);
256
257 assert(checkAllSuperRegsMarked(Reserved));
258 return Reserved;
259}
260
262isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const {
263 return !getReservedRegs(MF).test(PhysReg);
264}
265
267 MCRegister PhysReg) const {
268 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
269 const ARMFrameLowering *TFI = getFrameLowering(MF);
270
271 BitVector Reserved(getNumRegs());
272 markSuperRegs(Reserved, ARM::PC);
273 if (TFI->isFPReserved(MF))
274 markSuperRegs(Reserved, STI.getFramePointerReg());
275 if (hasBasePointer(MF))
276 markSuperRegs(Reserved, BasePtr);
277 assert(checkAllSuperRegsMarked(Reserved));
278 return Reserved.test(PhysReg.id());
279}
280
283 const MachineFunction &MF) const {
284 unsigned SuperID = RC->getID();
285 auto I = RC->superclasses().begin();
286 auto E = RC->superclasses().end();
287 do {
288 switch (SuperID) {
289 case ARM::GPRRegClassID:
290 case ARM::SPRRegClassID:
291 case ARM::DPRRegClassID:
292 case ARM::GPRPairRegClassID:
293 return getRegClass(SuperID);
294 case ARM::QPRRegClassID:
295 case ARM::QQPRRegClassID:
296 case ARM::QQQQPRRegClassID:
297 if (MF.getSubtarget<ARMSubtarget>().hasNEON())
298 return getRegClass(SuperID);
299 break;
300 case ARM::MQPRRegClassID:
301 case ARM::MQQPRRegClassID:
302 case ARM::MQQQQPRRegClassID:
303 if (MF.getSubtarget<ARMSubtarget>().hasMVEIntegerOps())
304 return getRegClass(SuperID);
305 break;
306 }
307 SuperID = (I != E) ? *I++ : ~0U;
308 } while (SuperID != ~0U);
309 return RC;
310}
311
314 const {
315 return &ARM::GPRRegClass;
316}
317
320 if (RC == &ARM::CCRRegClass)
321 return &ARM::rGPRRegClass; // Can't copy CCR registers.
322 if (RC == &ARM::cl_FPSCR_NZCVRegClass)
323 return &ARM::rGPRRegClass;
324 return RC;
325}
326
327unsigned
329 MachineFunction &MF) const {
330 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
331 const ARMFrameLowering *TFI = getFrameLowering(MF);
332
333 switch (RC->getID()) {
334 default:
335 return 0;
336 case ARM::tGPRRegClassID: {
337 // hasFP ends up calling getMaxCallFrameComputed() which may not be
338 // available when getPressureLimit() is called as part of
339 // ScheduleDAGRRList.
340 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
341 ? TFI->hasFP(MF) : true;
342 return 5 - HasFP;
343 }
344 case ARM::GPRRegClassID: {
345 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
346 ? TFI->hasFP(MF) : true;
347 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
348 }
349 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
350 case ARM::DPRRegClassID:
351 return 32 - 10;
352 }
353}
354
355// Get the other register in a GPRPair.
357 const MCRegisterInfo *RI) {
358 for (MCPhysReg Super : RI->superregs(Reg))
359 if (ARM::GPRPairRegClass.contains(Super))
360 return RI->getSubReg(Super, Odd ? ARM::gsub_1 : ARM::gsub_0);
361 return MCRegister();
362}
363
364// Resolve the RegPairEven / RegPairOdd register allocator hints.
366 Register VirtReg, ArrayRef<MCPhysReg> Order,
368 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
369 const MachineRegisterInfo &MRI = MF.getRegInfo();
370 std::pair<unsigned, Register> Hint = MRI.getRegAllocationHint(VirtReg);
371
372 unsigned Odd;
373 switch (Hint.first) {
375 Odd = 0;
376 break;
378 Odd = 1;
379 break;
380 case ARMRI::RegLR:
381 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
382 if (MRI.getRegClass(VirtReg)->contains(ARM::LR))
383 Hints.push_back(ARM::LR);
384 return false;
385 default:
386 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
387 }
388
389 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
390 // Check if the other part of the pair has already been assigned, and provide
391 // the paired register as the first hint.
392 Register Paired = Hint.second;
393 if (!Paired)
394 return false;
395
396 Register PairedPhys;
397 if (Paired.isPhysical()) {
398 PairedPhys = Paired;
399 } else if (VRM && VRM->hasPhys(Paired)) {
400 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
401 }
402
403 // First prefer the paired physreg.
404 if (PairedPhys && is_contained(Order, PairedPhys))
405 Hints.push_back(PairedPhys);
406
407 // Then prefer even or odd registers.
408 for (MCPhysReg Reg : Order) {
409 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
410 continue;
411 // Don't provide hints that are paired to a reserved register.
412 MCRegister Paired = getPairedGPR(Reg, !Odd, this);
413 if (!Paired || MRI.isReserved(Paired))
414 continue;
415 Hints.push_back(Reg);
416 }
417 return false;
418}
419
421 MachineFunction &MF) const {
423 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(Reg);
424 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) &&
425 Hint.second.isVirtual()) {
426 // If 'Reg' is one of the even / odd register pair and it's now changed
427 // (e.g. coalesced) into a different register. The other register of the
428 // pair allocation hint must be updated to reflect the relationship
429 // change.
430 Register OtherReg = Hint.second;
431 Hint = MRI->getRegAllocationHint(OtherReg);
432 // Make sure the pair has not already divorced.
433 if (Hint.second == Reg) {
434 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
435 if (NewReg.isVirtual())
436 MRI->setRegAllocationHint(NewReg,
437 Hint.first == ARMRI::RegPairOdd
440 OtherReg);
441 }
442 }
443}
444
446 const MachineFrameInfo &MFI = MF.getFrameInfo();
447 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
448 const ARMFrameLowering *TFI = getFrameLowering(MF);
449
450 // If we have stack realignment and VLAs, we have no pointer to use to
451 // access the stack. If we have stack realignment, and a large call frame,
452 // we have no place to allocate the emergency spill slot.
453 if (hasStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
454 return true;
455
456 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
457 // negative range for ldr/str (255), and Thumb1 is positive offsets only.
458 //
459 // It's going to be better to use the SP or Base Pointer instead. When there
460 // are variable sized objects, we can't reference off of the SP, so we
461 // reserve a Base Pointer.
462 //
463 // For Thumb2, estimate whether a negative offset from the frame pointer
464 // will be sufficient to reach the whole stack frame. If a function has a
465 // smallish frame, it's less likely to have lots of spills and callee saved
466 // space, so it's all more likely to be within range of the frame pointer.
467 // If it's wrong, the scavenger will still enable access to work, it just
468 // won't be optimal. (We should always be able to reach the emergency
469 // spill slot from the frame pointer.)
470 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() &&
471 MFI.getLocalFrameSize() >= 128)
472 return true;
473 // For Thumb1, if sp moves, nothing is in range, so force a base pointer.
474 // This is necessary for correctness in cases where we need an emergency
475 // spill slot. (In Thumb1, we can't use a negative offset from the frame
476 // pointer.)
477 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF))
478 return true;
479 return false;
480}
481
483 const MachineRegisterInfo *MRI = &MF.getRegInfo();
484 const ARMFrameLowering *TFI = getFrameLowering(MF);
485 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
486 // We can't realign the stack if:
487 // 1. Dynamic stack realignment is explicitly disabled,
488 // 2. There are VLAs in the function and the base pointer is disabled.
490 return false;
491 // Stack realignment requires a frame pointer. If we already started
492 // register allocation with frame pointer elimination, it is too late now.
493 if (!MRI->canReserveReg(STI.getFramePointerReg()))
494 return false;
495 // We may also need a base pointer if there are dynamic allocas or stack
496 // pointer adjustments around calls.
497 if (TFI->hasReservedCallFrame(MF))
498 return true;
499 // A base pointer is required and allowed. Check that it isn't too late to
500 // reserve it.
501 return MRI->canReserveReg(BasePtr);
502}
503
505cannotEliminateFrame(const MachineFunction &MF) const {
506 const MachineFrameInfo &MFI = MF.getFrameInfo();
508 return true;
509 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
510 hasStackRealignment(MF);
511}
512
515 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
516 const ARMFrameLowering *TFI = getFrameLowering(MF);
517
518 if (TFI->hasFP(MF))
519 return STI.getFramePointerReg();
520 return ARM::SP;
521}
522
523/// emitLoadConstPool - Emits a load from constpool to materialize the
524/// specified immediate.
527 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
528 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
529 MachineFunction &MF = *MBB.getParent();
532 const Constant *C =
533 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val);
534 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
535
536 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
537 .addReg(DestReg, getDefRegState(true), SubIdx)
539 .addImm(0)
540 .add(predOps(Pred, PredReg))
541 .setMIFlags(MIFlags);
542}
543
546 return true;
547}
548
551 return true;
552}
553
556 return true;
557}
558
560getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
561 const MCInstrDesc &Desc = MI->getDesc();
562 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
563 int64_t InstrOffs = 0;
564 int Scale = 1;
565 unsigned ImmIdx = 0;
566 switch (AddrMode) {
572 InstrOffs = MI->getOperand(Idx+1).getImm();
573 Scale = 1;
574 break;
575 case ARMII::AddrMode5: {
576 // VFP address mode.
577 const MachineOperand &OffOp = MI->getOperand(Idx+1);
578 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
579 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
580 InstrOffs = -InstrOffs;
581 Scale = 4;
582 break;
583 }
584 case ARMII::AddrMode2:
585 ImmIdx = Idx+2;
586 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
587 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
588 InstrOffs = -InstrOffs;
589 break;
590 case ARMII::AddrMode3:
591 ImmIdx = Idx+2;
592 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
593 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
594 InstrOffs = -InstrOffs;
595 break;
597 ImmIdx = Idx+1;
598 InstrOffs = MI->getOperand(ImmIdx).getImm();
599 Scale = 4;
600 break;
601 default:
602 llvm_unreachable("Unsupported addressing mode!");
603 }
604
605 return InstrOffs * Scale;
606}
607
608/// needsFrameBaseReg - Returns true if the instruction's frame index
609/// reference would be better served by a base register other than FP
610/// or SP. Used by LocalStackFrameAllocation to determine which frame index
611/// references it should create new base registers for.
613needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
614 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
615 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
616 }
617
618 // It's the load/store FI references that cause issues, as it can be difficult
619 // to materialize the offset if it won't fit in the literal field. Estimate
620 // based on the size of the local frame and some conservative assumptions
621 // about the rest of the stack frame (note, this is pre-regalloc, so
622 // we don't know everything for certain yet) whether this offset is likely
623 // to be out of range of the immediate. Return true if so.
624
625 // We only generate virtual base registers for loads and stores, so
626 // return false for everything else.
627 unsigned Opc = MI->getOpcode();
628 switch (Opc) {
629 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
630 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
631 case ARM::t2LDRi12: case ARM::t2LDRi8:
632 case ARM::t2STRi12: case ARM::t2STRi8:
633 case ARM::VLDRS: case ARM::VLDRD:
634 case ARM::VSTRS: case ARM::VSTRD:
635 case ARM::tSTRspi: case ARM::tLDRspi:
636 break;
637 default:
638 return false;
639 }
640
641 // Without a virtual base register, if the function has variable sized
642 // objects, all fixed-size local references will be via the frame pointer,
643 // Approximate the offset and see if it's legal for the instruction.
644 // Note that the incoming offset is based on the SP value at function entry,
645 // so it'll be negative.
646 MachineFunction &MF = *MI->getParent()->getParent();
647 const ARMFrameLowering *TFI = getFrameLowering(MF);
648 MachineFrameInfo &MFI = MF.getFrameInfo();
650
651 // Estimate an offset from the frame pointer.
652 // Conservatively assume all callee-saved registers get pushed. R4-R6
653 // will be earlier than the FP, so we ignore those.
654 // R7, LR
655 int64_t FPOffset = Offset - 8;
656 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
657 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
658 FPOffset -= 80;
659 // Estimate an offset from the stack pointer.
660 // The incoming offset is relating to the SP at the start of the function,
661 // but when we access the local it'll be relative to the SP after local
662 // allocation, so adjust our SP-relative offset by that allocation size.
663 Offset += MFI.getLocalFrameSize();
664 // Assume that we'll have at least some spill slots allocated.
665 // FIXME: This is a total SWAG number. We should run some statistics
666 // and pick a real one.
667 Offset += 128; // 128 bytes of spill slots
668
669 // If there's a frame pointer and the addressing mode allows it, try using it.
670 // The FP is only available if there is no dynamic realignment. We
671 // don't know for sure yet whether we'll need that, so we guess based
672 // on whether there are any local variables that would trigger it.
673 if (TFI->hasFP(MF) &&
674 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) &&
675 canRealignStack(MF))) {
676 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
677 return false;
678 }
679 // If we can reference via the stack pointer, try that.
680 // FIXME: This (and the code that resolves the references) can be improved
681 // to only disallow SP relative references in the live range of
682 // the VLA(s). In practice, it's unclear how much difference that
683 // would make, but it may be worth doing.
684 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset))
685 return false;
686
687 // The offset likely isn't legal, we want to allocate a virtual base register.
688 return true;
689}
690
691/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
692/// be a pointer to FrameIdx at the beginning of the basic block.
695 int FrameIdx,
696 int64_t Offset) const {
697 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
698 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
699 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
700
701 MachineBasicBlock::iterator Ins = MBB->begin();
702 DebugLoc DL; // Defaults to "unknown"
703 if (Ins != MBB->end())
704 DL = Ins->getDebugLoc();
705
706 const MachineFunction &MF = *MBB->getParent();
707 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
709 const MCInstrDesc &MCID = TII.get(ADDriOpc);
710 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
711 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
712
713 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
714 .addFrameIndex(FrameIdx).addImm(Offset);
715
716 if (!AFI->isThumb1OnlyFunction())
718
719 return BaseReg;
720}
721
723 int64_t Offset) const {
724 MachineBasicBlock &MBB = *MI.getParent();
725 MachineFunction &MF = *MBB.getParent();
726 const ARMBaseInstrInfo &TII =
727 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
729 int Off = Offset; // ARM doesn't need the general 64-bit offsets
730 unsigned i = 0;
731
733 "This resolveFrameIndex does not support Thumb1!");
734
735 while (!MI.getOperand(i).isFI()) {
736 ++i;
737 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
738 }
739 bool Done = false;
740 if (!AFI->isThumbFunction())
741 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
742 else {
743 assert(AFI->isThumb2Function());
744 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this);
745 }
746 assert(Done && "Unable to resolve frame index!");
747 (void)Done;
748}
749
751 Register BaseReg,
752 int64_t Offset) const {
753 const MCInstrDesc &Desc = MI->getDesc();
754 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
755 unsigned i = 0;
756 for (; !MI->getOperand(i).isFI(); ++i)
757 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!");
758
759 // AddrMode4 and AddrMode6 cannot handle any offset.
761 return Offset == 0;
762
763 unsigned NumBits = 0;
764 unsigned Scale = 1;
765 bool isSigned = true;
766 switch (AddrMode) {
771 // i8 supports only negative, and i12 supports only positive, so
772 // based on Offset sign, consider the appropriate instruction
773 Scale = 1;
774 if (Offset < 0) {
775 NumBits = 8;
776 Offset = -Offset;
777 } else {
778 NumBits = 12;
779 }
780 break;
781 case ARMII::AddrMode5:
782 // VFP address mode.
783 NumBits = 8;
784 Scale = 4;
785 break;
787 case ARMII::AddrMode2:
788 NumBits = 12;
789 break;
790 case ARMII::AddrMode3:
791 NumBits = 8;
792 break;
794 NumBits = (BaseReg == ARM::SP ? 8 : 5);
795 Scale = 4;
796 isSigned = false;
797 break;
798 default:
799 llvm_unreachable("Unsupported addressing mode!");
800 }
801
803 // Make sure the offset is encodable for instructions that scale the
804 // immediate.
805 if ((Offset & (Scale-1)) != 0)
806 return false;
807
808 if (isSigned && Offset < 0)
809 Offset = -Offset;
810
811 unsigned Mask = (1 << NumBits) - 1;
812 if ((unsigned)Offset <= Mask * Scale)
813 return true;
814
815 return false;
816}
817
818bool
820 int SPAdj, unsigned FIOperandNum,
821 RegScavenger *RS) const {
822 MachineInstr &MI = *II;
823 MachineBasicBlock &MBB = *MI.getParent();
824 MachineFunction &MF = *MBB.getParent();
825 const ARMBaseInstrInfo &TII =
826 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
827 const ARMFrameLowering *TFI = getFrameLowering(MF);
830 "This eliminateFrameIndex does not support Thumb1!");
831 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
832 Register FrameReg;
833
834 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
835
836 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
837 // call frame setup/destroy instructions have already been eliminated. That
838 // means the stack pointer cannot be used to access the emergency spill slot
839 // when !hasReservedCallFrame().
840#ifndef NDEBUG
841 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
842 assert(TFI->hasReservedCallFrame(MF) &&
843 "Cannot use SP to access the emergency spill slot in "
844 "functions without a reserved call frame");
846 "Cannot use SP to access the emergency spill slot in "
847 "functions with variable sized frame objects");
848 }
849#endif // NDEBUG
850
851 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
852
853 // Modify MI as necessary to handle as much of 'Offset' as possible
854 bool Done = false;
855 if (!AFI->isThumbFunction())
856 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
857 else {
858 assert(AFI->isThumb2Function());
859 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this);
860 }
861 if (Done)
862 return false;
863
864 // If we get here, the immediate doesn't fit into the instruction. We folded
865 // as much as possible above, handle the rest, providing a register that is
866 // SP+LargeImm.
867 assert(
868 (Offset ||
869 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
870 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 ||
871 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 ||
872 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 ||
873 (MI.getDesc().TSFlags & ARMII::AddrModeMask) ==
875 "This code isn't needed if offset already handled!");
876
877 unsigned ScratchReg = 0;
878 int PIdx = MI.findFirstPredOperandIdx();
879 ARMCC::CondCodes Pred = (PIdx == -1)
880 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
881 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
882
883 const MCInstrDesc &MCID = MI.getDesc();
884 const TargetRegisterClass *RegClass =
885 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent());
886
887 if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg)))
888 // Must be addrmode4/6.
889 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
890 else {
891 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass);
892 if (!AFI->isThumbFunction())
893 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
894 Offset, Pred, PredReg, TII);
895 else {
896 assert(AFI->isThumb2Function());
897 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
898 Offset, Pred, PredReg, TII);
899 }
900 // Update the original instruction to use the scratch register.
901 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
902 }
903 return false;
904}
905
907 const TargetRegisterClass *SrcRC,
908 unsigned SubReg,
909 const TargetRegisterClass *DstRC,
910 unsigned DstSubReg,
911 const TargetRegisterClass *NewRC,
912 LiveIntervals &LIS) const {
913 auto MBB = MI->getParent();
914 auto MF = MBB->getParent();
915 const MachineRegisterInfo &MRI = MF->getRegInfo();
916 // If not copying into a sub-register this should be ok because we shouldn't
917 // need to split the reg.
918 if (!DstSubReg)
919 return true;
920 // Small registers don't frequently cause a problem, so we can coalesce them.
921 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 &&
922 getRegSizeInBits(*SrcRC) < 256)
923 return true;
924
925 auto NewRCWeight =
926 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
927 auto SrcRCWeight =
928 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
929 auto DstRCWeight =
930 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
931 // If the source register class is more expensive than the destination, the
932 // coalescing is probably profitable.
933 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
934 return true;
935 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
936 return true;
937
938 // If the register allocator isn't constrained, we can always allow coalescing
939 // unfortunately we don't know yet if we will be constrained.
940 // The goal of this heuristic is to restrict how many expensive registers
941 // we allow to coalesce in a given basic block.
942 auto AFI = MF->getInfo<ARMFunctionInfo>();
943 auto It = AFI->getCoalescedWeight(MBB);
944
945 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
946 << It->second << "\n");
947 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
948 << NewRCWeight.RegWeight << "\n");
949
950 // This number is the largest round number that which meets the criteria:
951 // (1) addresses PR18825
952 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
953 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
954 // In practice the SizeMultiplier will only factor in for straight line code
955 // that uses a lot of NEON vectors, which isn't terribly common.
956 unsigned SizeMultiplier = MBB->size()/100;
957 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
958 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
959 It->second += NewRCWeight.RegWeight;
960 return true;
961 }
962 return false;
963}
964
966 unsigned DefSubReg,
967 const TargetRegisterClass *SrcRC,
968 unsigned SrcSubReg) const {
969 // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2).
970 if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 &&
971 SrcRC == &ARM::DPRRegClass &&
972 (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1))
973 return false;
974
975 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
976 SrcRC, SrcSubReg);
977}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCRegister getPairedGPR(MCRegister Reg, bool Odd, const MCRegisterInfo *RI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:119
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool hasBasePointer(const MachineFunction &MF) const
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const override
const uint32_t * getNoPreservedMask() const override
bool isInlineAsmReadOnlyReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
virtual void emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred=ARMCC::AL, Register PredReg=Register(), unsigned MIFlags=MachineInstr::NoFlags) const
emitLoadConstPool - Emits a load from constpool to materialize the specified immediate.
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
bool cannotEliminateFrame(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx...
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Code Generation virtual methods...
const uint32_t * getTLSCallPreservedMask(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
unsigned BasePtr
BasePtr - ARM physical register used as a base ptr in complex stack frames.
ArrayRef< MCPhysReg > getIntraCallClobberedRegs(const MachineFunction *MF) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool canRealignStack(const MachineFunction &MF) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
int ResolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, int SPAdj) const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
DenseMap< constMachineBasicBlock *, unsigned >::iterator getCoalescedWeight(MachineBasicBlock *MBB)
bool isThumb1Only() const
MCPhysReg getFramePointerReg() const
const ARMTargetLowering * getTargetLowering() const override
bool isTargetDarwin() const
bool hasVFP2Base() const
bool isR9Reserved() const
enum PushPopSplitVariation getPushPopSplitVariation(const MachineFunction &MF) const
PushPopSplitVariation
How the push and pop instructions of callee saved general-purpose registers should be split.
@ SplitR11WindowsSEH
When the stack frame size is not known (because of variable-sized objects or realignment),...
@ SplitR7
R7 and LR must be adjacent, because R7 is the frame pointer, and must point to a frame record consist...
@ SplitR11AAPCSSignRA
When generating AAPCS-compilant frame chains, R11 is the frame pointer, and must be pushed adjacent t...
bool isMClass() const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
iterator end() const
Definition ArrayRef.h:136
iterator begin() const
Definition ArrayRef.h:135
bool test(unsigned Idx) const
Definition BitVector.h:461
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
Describe properties that are true of each instruction in the target description file.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
iterator_range< MCSuperRegIterator > superregs(MCRegister Reg) const
Return an iterator range over all super-registers of Reg, excluding Reg.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
constexpr unsigned id() const
Definition MCRegister.h:74
MachineInstrBundleIterator< MachineInstr > iterator
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
bool isMaxCallFrameSizeComputed() const
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
ArrayRef< unsigned > superclasses() const
Returns a list of super-classes.
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned getAM2Offset(unsigned AM2Opc)
AddrOpc getAM5Op(unsigned AM5Opc)
unsigned char getAM5Offset(unsigned AM5Opc)
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
Define some predicates that are used for node matching.
Definition ARMEHABI.h:25
@ D16
Only 16 D registers.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition CallingConv.h:82
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Done
Definition Threading.h:60
bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII, const TargetRegisterInfo *TRI)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
Op::Description Desc
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
unsigned getDefRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
void emitT2RegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39