LLVM 20.0.0git
ARMBaseRegisterInfo.cpp
Go to the documentation of this file.
1//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the base ARM implementation of TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARMBaseRegisterInfo.h"
14#include "ARM.h"
15#include "ARMBaseInstrInfo.h"
16#include "ARMFrameLowering.h"
18#include "ARMSubtarget.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/STLExtras.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DebugLoc.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/Type.h"
41#include "llvm/MC/MCInstrDesc.h"
42#include "llvm/Support/Debug.h"
47#include <cassert>
48#include <utility>
49
50#define DEBUG_TYPE "arm-register-info"
51
52#define GET_REGINFO_TARGET_DESC
53#include "ARMGenRegisterInfo.inc"
54
55using namespace llvm;
56
58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) {
60}
61
62const MCPhysReg*
64 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
67 const Function &F = MF->getFunction();
68
69 if (F.getCallingConv() == CallingConv::GHC) {
70 // GHC set of callee saved regs is empty as all those regs are
71 // used for passing STG regs around
72 return CSR_NoRegs_SaveList;
73 } else if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) {
74 return CSR_Win_SplitFP_SaveList;
75 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) {
76 return CSR_Win_AAPCS_CFGuard_Check_SaveList;
77 } else if (F.getCallingConv() == CallingConv::SwiftTail) {
78 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_SaveList
79 : (PushPopSplit == ARMSubtarget::SplitR7
80 ? CSR_ATPCS_SplitPush_SwiftTail_SaveList
81 : CSR_AAPCS_SwiftTail_SaveList);
82 } else if (F.hasFnAttribute("interrupt")) {
83 if (STI.isMClass()) {
84 // M-class CPUs have hardware which saves the registers needed to allow a
85 // function conforming to the AAPCS to function as a handler.
86 return PushPopSplit == ARMSubtarget::SplitR7
87 ? CSR_ATPCS_SplitPush_SaveList
88 : CSR_AAPCS_SaveList;
89 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
90 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
91 // need to be saved to restore user-mode state.
92 return CSR_FIQ_SaveList;
93 } else {
94 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
95 // exception handling.
96 return CSR_GenericInt_SaveList;
97 }
98 }
99
101 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
102 if (STI.isTargetDarwin())
103 return CSR_iOS_SwiftError_SaveList;
104
105 return PushPopSplit == ARMSubtarget::SplitR7
106 ? CSR_ATPCS_SplitPush_SwiftError_SaveList
107 : CSR_AAPCS_SwiftError_SaveList;
108 }
109
110 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS)
111 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
112 ? CSR_iOS_CXX_TLS_PE_SaveList
113 : CSR_iOS_CXX_TLS_SaveList;
114
115 if (STI.isTargetDarwin())
116 return CSR_iOS_SaveList;
117
118 if (PushPopSplit == ARMSubtarget::SplitR7)
119 return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_R7_SaveList
120 : CSR_ATPCS_SplitPush_SaveList;
121
122 if (PushPopSplit == ARMSubtarget::SplitR11AAPCSSignRA)
123 return CSR_AAPCS_SplitPush_R11_SaveList;
124
125 return CSR_AAPCS_SaveList;
126}
127
129 const MachineFunction *MF) const {
130 assert(MF && "Invalid MachineFunction pointer.");
133 return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
134 return nullptr;
135}
136
137const uint32_t *
139 CallingConv::ID CC) const {
140 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
141 if (CC == CallingConv::GHC)
142 // This is academic because all GHC calls are (supposed to be) tail calls
143 return CSR_NoRegs_RegMask;
145 return CSR_Win_AAPCS_CFGuard_Check_RegMask;
146 if (CC == CallingConv::SwiftTail) {
147 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_RegMask
148 : CSR_AAPCS_SwiftTail_RegMask;
149 }
151 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
152 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask
153 : CSR_AAPCS_SwiftError_RegMask;
154
156 return CSR_iOS_CXX_TLS_RegMask;
157 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
158}
159
160const uint32_t*
162 return CSR_NoRegs_RegMask;
163}
164
165const uint32_t *
168 "only know about special TLS call on Darwin");
169 return CSR_iOS_TLSCall_RegMask;
170}
171
172const uint32_t *
174 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
175 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only())
176 return CSR_NoRegs_RegMask;
177 else
178 return CSR_FPRegs_RegMask;
179}
180
181const uint32_t *
183 CallingConv::ID CC) const {
184 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
185 // This should return a register mask that is the same as that returned by
186 // getCallPreservedMask but that additionally preserves the register used for
187 // the first i32 argument (which must also be the register used to return a
188 // single i32 return value)
189 //
190 // In case that the calling convention does not use the same register for
191 // both or otherwise does not want to enable this optimization, the function
192 // should return NULL
193 if (CC == CallingConv::GHC)
194 // This is academic because all GHC calls are (supposed to be) tail calls
195 return nullptr;
196 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
197 : CSR_AAPCS_ThisReturn_RegMask;
198}
199
201 const MachineFunction *MF) const {
202 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12};
203 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs);
204}
205
207getReservedRegs(const MachineFunction &MF) const {
208 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
209 const ARMFrameLowering *TFI = getFrameLowering(MF);
210
211 // FIXME: avoid re-calculating this every time.
212 BitVector Reserved(getNumRegs());
213 markSuperRegs(Reserved, ARM::SP);
214 markSuperRegs(Reserved, ARM::PC);
215 markSuperRegs(Reserved, ARM::FPSCR);
216 markSuperRegs(Reserved, ARM::APSR_NZCV);
217 if (TFI->isFPReserved(MF))
218 markSuperRegs(Reserved, STI.getFramePointerReg());
219 if (hasBasePointer(MF))
220 markSuperRegs(Reserved, BasePtr);
221 // Some targets reserve R9.
222 if (STI.isR9Reserved())
223 markSuperRegs(Reserved, ARM::R9);
224 // Reserve D16-D31 if the subtarget doesn't support them.
225 if (!STI.hasD32()) {
226 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!");
227 for (unsigned R = 0; R < 16; ++R)
228 markSuperRegs(Reserved, ARM::D16 + R);
229 }
230 const TargetRegisterClass &RC = ARM::GPRPairRegClass;
231 for (unsigned Reg : RC)
232 for (MCPhysReg S : subregs(Reg))
233 if (Reserved.test(S))
234 markSuperRegs(Reserved, Reg);
235 // For v8.1m architecture
236 markSuperRegs(Reserved, ARM::ZR);
237
238 assert(checkAllSuperRegsMarked(Reserved));
239 return Reserved;
240}
241
243isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const {
244 return !getReservedRegs(MF).test(PhysReg);
245}
246
248 unsigned PhysReg) const {
249 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
250 const ARMFrameLowering *TFI = getFrameLowering(MF);
251
252 BitVector Reserved(getNumRegs());
253 markSuperRegs(Reserved, ARM::PC);
254 if (TFI->isFPReserved(MF))
255 markSuperRegs(Reserved, STI.getFramePointerReg());
256 if (hasBasePointer(MF))
257 markSuperRegs(Reserved, BasePtr);
258 assert(checkAllSuperRegsMarked(Reserved));
259 return Reserved.test(PhysReg);
260}
261
264 const MachineFunction &MF) const {
265 unsigned SuperID = RC->getID();
266 auto I = RC->superclasses().begin();
267 auto E = RC->superclasses().end();
268 do {
269 switch (SuperID) {
270 case ARM::GPRRegClassID:
271 case ARM::SPRRegClassID:
272 case ARM::DPRRegClassID:
273 case ARM::GPRPairRegClassID:
274 return getRegClass(SuperID);
275 case ARM::QPRRegClassID:
276 case ARM::QQPRRegClassID:
277 case ARM::QQQQPRRegClassID:
278 if (MF.getSubtarget<ARMSubtarget>().hasNEON())
279 return getRegClass(SuperID);
280 break;
281 case ARM::MQPRRegClassID:
282 case ARM::MQQPRRegClassID:
283 case ARM::MQQQQPRRegClassID:
284 if (MF.getSubtarget<ARMSubtarget>().hasMVEIntegerOps())
285 return getRegClass(SuperID);
286 break;
287 }
288 SuperID = (I != E) ? *I++ : ~0U;
289 } while (SuperID != ~0U);
290 return RC;
291}
292
295 const {
296 return &ARM::GPRRegClass;
297}
298
301 if (RC == &ARM::CCRRegClass)
302 return &ARM::rGPRRegClass; // Can't copy CCR registers.
303 if (RC == &ARM::cl_FPSCR_NZCVRegClass)
304 return &ARM::rGPRRegClass;
305 return RC;
306}
307
308unsigned
310 MachineFunction &MF) const {
311 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
312 const ARMFrameLowering *TFI = getFrameLowering(MF);
313
314 switch (RC->getID()) {
315 default:
316 return 0;
317 case ARM::tGPRRegClassID: {
318 // hasFP ends up calling getMaxCallFrameComputed() which may not be
319 // available when getPressureLimit() is called as part of
320 // ScheduleDAGRRList.
321 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
322 ? TFI->hasFP(MF) : true;
323 return 5 - HasFP;
324 }
325 case ARM::GPRRegClassID: {
326 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
327 ? TFI->hasFP(MF) : true;
328 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
329 }
330 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
331 case ARM::DPRRegClassID:
332 return 32 - 10;
333 }
334}
335
336// Get the other register in a GPRPair.
337static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd,
338 const MCRegisterInfo *RI) {
339 for (MCPhysReg Super : RI->superregs(Reg))
340 if (ARM::GPRPairRegClass.contains(Super))
341 return RI->getSubReg(Super, Odd ? ARM::gsub_1 : ARM::gsub_0);
342 return 0;
343}
344
345// Resolve the RegPairEven / RegPairOdd register allocator hints.
347 Register VirtReg, ArrayRef<MCPhysReg> Order,
349 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
350 const MachineRegisterInfo &MRI = MF.getRegInfo();
351 std::pair<unsigned, Register> Hint = MRI.getRegAllocationHint(VirtReg);
352
353 unsigned Odd;
354 switch (Hint.first) {
356 Odd = 0;
357 break;
359 Odd = 1;
360 break;
361 case ARMRI::RegLR:
362 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
363 if (MRI.getRegClass(VirtReg)->contains(ARM::LR))
364 Hints.push_back(ARM::LR);
365 return false;
366 default:
367 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
368 }
369
370 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
371 // Check if the other part of the pair has already been assigned, and provide
372 // the paired register as the first hint.
373 Register Paired = Hint.second;
374 if (!Paired)
375 return false;
376
377 Register PairedPhys;
378 if (Paired.isPhysical()) {
379 PairedPhys = Paired;
380 } else if (VRM && VRM->hasPhys(Paired)) {
381 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
382 }
383
384 // First prefer the paired physreg.
385 if (PairedPhys && is_contained(Order, PairedPhys))
386 Hints.push_back(PairedPhys);
387
388 // Then prefer even or odd registers.
389 for (MCPhysReg Reg : Order) {
390 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
391 continue;
392 // Don't provide hints that are paired to a reserved register.
393 MCPhysReg Paired = getPairedGPR(Reg, !Odd, this);
394 if (!Paired || MRI.isReserved(Paired))
395 continue;
396 Hints.push_back(Reg);
397 }
398 return false;
399}
400
402 MachineFunction &MF) const {
404 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(Reg);
405 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) &&
406 Hint.second.isVirtual()) {
407 // If 'Reg' is one of the even / odd register pair and it's now changed
408 // (e.g. coalesced) into a different register. The other register of the
409 // pair allocation hint must be updated to reflect the relationship
410 // change.
411 Register OtherReg = Hint.second;
412 Hint = MRI->getRegAllocationHint(OtherReg);
413 // Make sure the pair has not already divorced.
414 if (Hint.second == Reg) {
415 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
416 if (NewReg.isVirtual())
417 MRI->setRegAllocationHint(NewReg,
418 Hint.first == ARMRI::RegPairOdd
421 OtherReg);
422 }
423 }
424}
425
427 const MachineFrameInfo &MFI = MF.getFrameInfo();
428 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
429 const ARMFrameLowering *TFI = getFrameLowering(MF);
430
431 // If we have stack realignment and VLAs, we have no pointer to use to
432 // access the stack. If we have stack realignment, and a large call frame,
433 // we have no place to allocate the emergency spill slot.
434 if (hasStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
435 return true;
436
437 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
438 // negative range for ldr/str (255), and Thumb1 is positive offsets only.
439 //
440 // It's going to be better to use the SP or Base Pointer instead. When there
441 // are variable sized objects, we can't reference off of the SP, so we
442 // reserve a Base Pointer.
443 //
444 // For Thumb2, estimate whether a negative offset from the frame pointer
445 // will be sufficient to reach the whole stack frame. If a function has a
446 // smallish frame, it's less likely to have lots of spills and callee saved
447 // space, so it's all more likely to be within range of the frame pointer.
448 // If it's wrong, the scavenger will still enable access to work, it just
449 // won't be optimal. (We should always be able to reach the emergency
450 // spill slot from the frame pointer.)
451 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() &&
452 MFI.getLocalFrameSize() >= 128)
453 return true;
454 // For Thumb1, if sp moves, nothing is in range, so force a base pointer.
455 // This is necessary for correctness in cases where we need an emergency
456 // spill slot. (In Thumb1, we can't use a negative offset from the frame
457 // pointer.)
458 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF))
459 return true;
460 return false;
461}
462
464 const MachineRegisterInfo *MRI = &MF.getRegInfo();
465 const ARMFrameLowering *TFI = getFrameLowering(MF);
466 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
467 // We can't realign the stack if:
468 // 1. Dynamic stack realignment is explicitly disabled,
469 // 2. There are VLAs in the function and the base pointer is disabled.
471 return false;
472 // Stack realignment requires a frame pointer. If we already started
473 // register allocation with frame pointer elimination, it is too late now.
474 if (!MRI->canReserveReg(STI.getFramePointerReg()))
475 return false;
476 // We may also need a base pointer if there are dynamic allocas or stack
477 // pointer adjustments around calls.
478 if (TFI->hasReservedCallFrame(MF))
479 return true;
480 // A base pointer is required and allowed. Check that it isn't too late to
481 // reserve it.
482 return MRI->canReserveReg(BasePtr);
483}
484
486cannotEliminateFrame(const MachineFunction &MF) const {
487 const MachineFrameInfo &MFI = MF.getFrameInfo();
489 return true;
490 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
491 hasStackRealignment(MF);
492}
493
496 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
497 const ARMFrameLowering *TFI = getFrameLowering(MF);
498
499 if (TFI->hasFP(MF))
500 return STI.getFramePointerReg();
501 return ARM::SP;
502}
503
504/// emitLoadConstPool - Emits a load from constpool to materialize the
505/// specified immediate.
508 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
509 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
513 const Constant *C =
514 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val);
515 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
516
517 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
518 .addReg(DestReg, getDefRegState(true), SubIdx)
520 .addImm(0)
521 .add(predOps(Pred, PredReg))
522 .setMIFlags(MIFlags);
523}
524
527 return true;
528}
529
532 return true;
533}
534
537 return true;
538}
539
541getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
542 const MCInstrDesc &Desc = MI->getDesc();
543 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
544 int64_t InstrOffs = 0;
545 int Scale = 1;
546 unsigned ImmIdx = 0;
547 switch (AddrMode) {
553 InstrOffs = MI->getOperand(Idx+1).getImm();
554 Scale = 1;
555 break;
556 case ARMII::AddrMode5: {
557 // VFP address mode.
558 const MachineOperand &OffOp = MI->getOperand(Idx+1);
559 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
560 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
561 InstrOffs = -InstrOffs;
562 Scale = 4;
563 break;
564 }
565 case ARMII::AddrMode2:
566 ImmIdx = Idx+2;
567 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
568 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
569 InstrOffs = -InstrOffs;
570 break;
571 case ARMII::AddrMode3:
572 ImmIdx = Idx+2;
573 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
574 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
575 InstrOffs = -InstrOffs;
576 break;
578 ImmIdx = Idx+1;
579 InstrOffs = MI->getOperand(ImmIdx).getImm();
580 Scale = 4;
581 break;
582 default:
583 llvm_unreachable("Unsupported addressing mode!");
584 }
585
586 return InstrOffs * Scale;
587}
588
589/// needsFrameBaseReg - Returns true if the instruction's frame index
590/// reference would be better served by a base register other than FP
591/// or SP. Used by LocalStackFrameAllocation to determine which frame index
592/// references it should create new base registers for.
594needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
595 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
596 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
597 }
598
599 // It's the load/store FI references that cause issues, as it can be difficult
600 // to materialize the offset if it won't fit in the literal field. Estimate
601 // based on the size of the local frame and some conservative assumptions
602 // about the rest of the stack frame (note, this is pre-regalloc, so
603 // we don't know everything for certain yet) whether this offset is likely
604 // to be out of range of the immediate. Return true if so.
605
606 // We only generate virtual base registers for loads and stores, so
607 // return false for everything else.
608 unsigned Opc = MI->getOpcode();
609 switch (Opc) {
610 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
611 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
612 case ARM::t2LDRi12: case ARM::t2LDRi8:
613 case ARM::t2STRi12: case ARM::t2STRi8:
614 case ARM::VLDRS: case ARM::VLDRD:
615 case ARM::VSTRS: case ARM::VSTRD:
616 case ARM::tSTRspi: case ARM::tLDRspi:
617 break;
618 default:
619 return false;
620 }
621
622 // Without a virtual base register, if the function has variable sized
623 // objects, all fixed-size local references will be via the frame pointer,
624 // Approximate the offset and see if it's legal for the instruction.
625 // Note that the incoming offset is based on the SP value at function entry,
626 // so it'll be negative.
627 MachineFunction &MF = *MI->getParent()->getParent();
628 const ARMFrameLowering *TFI = getFrameLowering(MF);
629 MachineFrameInfo &MFI = MF.getFrameInfo();
631
632 // Estimate an offset from the frame pointer.
633 // Conservatively assume all callee-saved registers get pushed. R4-R6
634 // will be earlier than the FP, so we ignore those.
635 // R7, LR
636 int64_t FPOffset = Offset - 8;
637 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
638 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
639 FPOffset -= 80;
640 // Estimate an offset from the stack pointer.
641 // The incoming offset is relating to the SP at the start of the function,
642 // but when we access the local it'll be relative to the SP after local
643 // allocation, so adjust our SP-relative offset by that allocation size.
644 Offset += MFI.getLocalFrameSize();
645 // Assume that we'll have at least some spill slots allocated.
646 // FIXME: This is a total SWAG number. We should run some statistics
647 // and pick a real one.
648 Offset += 128; // 128 bytes of spill slots
649
650 // If there's a frame pointer and the addressing mode allows it, try using it.
651 // The FP is only available if there is no dynamic realignment. We
652 // don't know for sure yet whether we'll need that, so we guess based
653 // on whether there are any local variables that would trigger it.
654 if (TFI->hasFP(MF) &&
655 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) &&
656 canRealignStack(MF))) {
657 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
658 return false;
659 }
660 // If we can reference via the stack pointer, try that.
661 // FIXME: This (and the code that resolves the references) can be improved
662 // to only disallow SP relative references in the live range of
663 // the VLA(s). In practice, it's unclear how much difference that
664 // would make, but it may be worth doing.
665 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset))
666 return false;
667
668 // The offset likely isn't legal, we want to allocate a virtual base register.
669 return true;
670}
671
672/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
673/// be a pointer to FrameIdx at the beginning of the basic block.
676 int FrameIdx,
677 int64_t Offset) const {
679 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
680 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
681
683 DebugLoc DL; // Defaults to "unknown"
684 if (Ins != MBB->end())
685 DL = Ins->getDebugLoc();
686
687 const MachineFunction &MF = *MBB->getParent();
690 const MCInstrDesc &MCID = TII.get(ADDriOpc);
691 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
692 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
693
694 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
695 .addFrameIndex(FrameIdx).addImm(Offset);
696
697 if (!AFI->isThumb1OnlyFunction())
699
700 return BaseReg;
701}
702
704 int64_t Offset) const {
705 MachineBasicBlock &MBB = *MI.getParent();
707 const ARMBaseInstrInfo &TII =
708 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
710 int Off = Offset; // ARM doesn't need the general 64-bit offsets
711 unsigned i = 0;
712
714 "This resolveFrameIndex does not support Thumb1!");
715
716 while (!MI.getOperand(i).isFI()) {
717 ++i;
718 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
719 }
720 bool Done = false;
721 if (!AFI->isThumbFunction())
722 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
723 else {
724 assert(AFI->isThumb2Function());
725 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this);
726 }
727 assert(Done && "Unable to resolve frame index!");
728 (void)Done;
729}
730
732 Register BaseReg,
733 int64_t Offset) const {
734 const MCInstrDesc &Desc = MI->getDesc();
735 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
736 unsigned i = 0;
737 for (; !MI->getOperand(i).isFI(); ++i)
738 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!");
739
740 // AddrMode4 and AddrMode6 cannot handle any offset.
742 return Offset == 0;
743
744 unsigned NumBits = 0;
745 unsigned Scale = 1;
746 bool isSigned = true;
747 switch (AddrMode) {
752 // i8 supports only negative, and i12 supports only positive, so
753 // based on Offset sign, consider the appropriate instruction
754 Scale = 1;
755 if (Offset < 0) {
756 NumBits = 8;
757 Offset = -Offset;
758 } else {
759 NumBits = 12;
760 }
761 break;
762 case ARMII::AddrMode5:
763 // VFP address mode.
764 NumBits = 8;
765 Scale = 4;
766 break;
768 case ARMII::AddrMode2:
769 NumBits = 12;
770 break;
771 case ARMII::AddrMode3:
772 NumBits = 8;
773 break;
775 NumBits = (BaseReg == ARM::SP ? 8 : 5);
776 Scale = 4;
777 isSigned = false;
778 break;
779 default:
780 llvm_unreachable("Unsupported addressing mode!");
781 }
782
784 // Make sure the offset is encodable for instructions that scale the
785 // immediate.
786 if ((Offset & (Scale-1)) != 0)
787 return false;
788
789 if (isSigned && Offset < 0)
790 Offset = -Offset;
791
792 unsigned Mask = (1 << NumBits) - 1;
793 if ((unsigned)Offset <= Mask * Scale)
794 return true;
795
796 return false;
797}
798
799bool
801 int SPAdj, unsigned FIOperandNum,
802 RegScavenger *RS) const {
803 MachineInstr &MI = *II;
804 MachineBasicBlock &MBB = *MI.getParent();
806 const ARMBaseInstrInfo &TII =
807 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
808 const ARMFrameLowering *TFI = getFrameLowering(MF);
811 "This eliminateFrameIndex does not support Thumb1!");
812 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
813 Register FrameReg;
814
815 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
816
817 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
818 // call frame setup/destroy instructions have already been eliminated. That
819 // means the stack pointer cannot be used to access the emergency spill slot
820 // when !hasReservedCallFrame().
821#ifndef NDEBUG
822 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
823 assert(TFI->hasReservedCallFrame(MF) &&
824 "Cannot use SP to access the emergency spill slot in "
825 "functions without a reserved call frame");
827 "Cannot use SP to access the emergency spill slot in "
828 "functions with variable sized frame objects");
829 }
830#endif // NDEBUG
831
832 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
833
834 // Modify MI as necessary to handle as much of 'Offset' as possible
835 bool Done = false;
836 if (!AFI->isThumbFunction())
837 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
838 else {
839 assert(AFI->isThumb2Function());
840 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this);
841 }
842 if (Done)
843 return false;
844
845 // If we get here, the immediate doesn't fit into the instruction. We folded
846 // as much as possible above, handle the rest, providing a register that is
847 // SP+LargeImm.
848 assert(
849 (Offset ||
850 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
851 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 ||
852 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 ||
853 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 ||
854 (MI.getDesc().TSFlags & ARMII::AddrModeMask) ==
856 "This code isn't needed if offset already handled!");
857
858 unsigned ScratchReg = 0;
859 int PIdx = MI.findFirstPredOperandIdx();
860 ARMCC::CondCodes Pred = (PIdx == -1)
861 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
862 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
863
864 const MCInstrDesc &MCID = MI.getDesc();
865 const TargetRegisterClass *RegClass =
866 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent());
867
868 if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg)))
869 // Must be addrmode4/6.
870 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
871 else {
872 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass);
873 if (!AFI->isThumbFunction())
874 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
875 Offset, Pred, PredReg, TII);
876 else {
877 assert(AFI->isThumb2Function());
878 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
879 Offset, Pred, PredReg, TII);
880 }
881 // Update the original instruction to use the scratch register.
882 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
883 }
884 return false;
885}
886
888 const TargetRegisterClass *SrcRC,
889 unsigned SubReg,
890 const TargetRegisterClass *DstRC,
891 unsigned DstSubReg,
892 const TargetRegisterClass *NewRC,
893 LiveIntervals &LIS) const {
894 auto MBB = MI->getParent();
895 auto MF = MBB->getParent();
896 const MachineRegisterInfo &MRI = MF->getRegInfo();
897 // If not copying into a sub-register this should be ok because we shouldn't
898 // need to split the reg.
899 if (!DstSubReg)
900 return true;
901 // Small registers don't frequently cause a problem, so we can coalesce them.
902 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 &&
903 getRegSizeInBits(*SrcRC) < 256)
904 return true;
905
906 auto NewRCWeight =
907 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
908 auto SrcRCWeight =
909 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
910 auto DstRCWeight =
911 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
912 // If the source register class is more expensive than the destination, the
913 // coalescing is probably profitable.
914 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
915 return true;
916 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
917 return true;
918
919 // If the register allocator isn't constrained, we can always allow coalescing
920 // unfortunately we don't know yet if we will be constrained.
921 // The goal of this heuristic is to restrict how many expensive registers
922 // we allow to coalesce in a given basic block.
923 auto AFI = MF->getInfo<ARMFunctionInfo>();
924 auto It = AFI->getCoalescedWeight(MBB);
925
926 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
927 << It->second << "\n");
928 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
929 << NewRCWeight.RegWeight << "\n");
930
931 // This number is the largest round number that which meets the criteria:
932 // (1) addresses PR18825
933 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
934 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
935 // In practice the SizeMultiplier will only factor in for straight line code
936 // that uses a lot of NEON vectors, which isn't terribly common.
937 unsigned SizeMultiplier = MBB->size()/100;
938 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
939 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
940 It->second += NewRCWeight.RegWeight;
941 return true;
942 }
943 return false;
944}
945
947 unsigned DefSubReg,
948 const TargetRegisterClass *SrcRC,
949 unsigned SrcSubReg) const {
950 // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2).
951 if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 &&
952 SrcRC == &ARM::DPRRegClass &&
953 (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1))
954 return false;
955
956 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
957 SrcRC, SrcSubReg);
958}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd, const MCRegisterInfo *RI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallVector class.
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool hasBasePointer(const MachineFunction &MF) const
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const override
const uint32_t * getNoPreservedMask() const override
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
virtual void emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred=ARMCC::AL, Register PredReg=Register(), unsigned MIFlags=MachineInstr::NoFlags) const
emitLoadConstPool - Emits a load from constpool to materialize the specified immediate.
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
bool cannotEliminateFrame(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx...
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Code Generation virtual methods...
const uint32_t * getTLSCallPreservedMask(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
unsigned BasePtr
BasePtr - ARM physical register used as a base ptr in complex stack frames.
bool isInlineAsmReadOnlyReg(const MachineFunction &MF, unsigned PhysReg) const override
ArrayRef< MCPhysReg > getIntraCallClobberedRegs(const MachineFunction *MF) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool canRealignStack(const MachineFunction &MF) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
int ResolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, int SPAdj) const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
DenseMap< constMachineBasicBlock *, unsigned >::iterator getCoalescedWeight(MachineBasicBlock *MBB)
bool isThumb1Only() const
Definition: ARMSubtarget.h:403
MCPhysReg getFramePointerReg() const
Definition: ARMSubtarget.h:413
const ARMTargetLowering * getTargetLowering() const override
Definition: ARMSubtarget.h:242
bool isTargetDarwin() const
Definition: ARMSubtarget.h:337
bool hasVFP2Base() const
Definition: ARMSubtarget.h:308
bool isR9Reserved() const
Definition: ARMSubtarget.h:409
enum PushPopSplitVariation getPushPopSplitVariation(const MachineFunction &MF) const
PushPopSplitVariation
How the push and pop instructions of callee saved general-purpose registers should be split.
Definition: ARMSubtarget.h:86
@ SplitR11WindowsSEH
When the stack frame size is not known (because of variable-sized objects or realignment),...
Definition: ARMSubtarget.h:111
@ SplitR7
R7 and LR must be adjacent, because R7 is the frame pointer, and must point to a frame record consist...
Definition: ARMSubtarget.h:102
@ SplitR11AAPCSSignRA
When generating AAPCS-compilant frame chains, R11 is the frame pointer, and must be pushed adjacent t...
Definition: ARMSubtarget.h:123
bool isMClass() const
Definition: ARMSubtarget.h:405
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
iterator begin() const
Definition: ArrayRef.h:156
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool test(unsigned Idx) const
Definition: BitVector.h:461
This is an important base class in LLVM.
Definition: Constant.h:42
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:353
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
iterator_range< MCSuperRegIterator > superregs(MCRegister Reg) const
Return an iterator range over all super-registers of Reg, excluding Reg.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
bool isMaxCallFrameSizeComputed() const
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
ArrayRef< unsigned > superclasses() const
Returns a list of super-classes.
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
static IntegerType * getInt32Ty(LLVMContext &C)
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:90
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition: VirtRegMap.h:86
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned getAM2Offset(unsigned AM2Opc)
AddrOpc getAM5Op(unsigned AM5Opc)
unsigned char getAM5Offset(unsigned AM5Opc)
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ D16
Only 16 D registers.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Done
Definition: Threading.h:61
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:21
bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII, const TargetRegisterInfo *TRI)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
unsigned getDefRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
void emitT2RegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.