LLVM 19.0.0git
ARMBaseRegisterInfo.cpp
Go to the documentation of this file.
1//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the base ARM implementation of TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARMBaseRegisterInfo.h"
14#include "ARM.h"
15#include "ARMBaseInstrInfo.h"
16#include "ARMFrameLowering.h"
18#include "ARMSubtarget.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/STLExtras.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DebugLoc.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/Type.h"
41#include "llvm/MC/MCInstrDesc.h"
42#include "llvm/Support/Debug.h"
47#include <cassert>
48#include <utility>
49
50#define DEBUG_TYPE "arm-register-info"
51
52#define GET_REGINFO_TARGET_DESC
53#include "ARMGenRegisterInfo.inc"
54
55using namespace llvm;
56
58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) {
60}
61
62const MCPhysReg*
64 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
65 bool UseSplitPush = STI.splitFramePushPop(*MF);
66 const Function &F = MF->getFunction();
67
68 if (F.getCallingConv() == CallingConv::GHC) {
69 // GHC set of callee saved regs is empty as all those regs are
70 // used for passing STG regs around
71 return CSR_NoRegs_SaveList;
72 } else if (STI.splitFramePointerPush(*MF)) {
73 return CSR_Win_SplitFP_SaveList;
74 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) {
75 return CSR_Win_AAPCS_CFGuard_Check_SaveList;
76 } else if (F.getCallingConv() == CallingConv::SwiftTail) {
77 return STI.isTargetDarwin()
78 ? CSR_iOS_SwiftTail_SaveList
79 : (UseSplitPush ? CSR_ATPCS_SplitPush_SwiftTail_SaveList
80 : CSR_AAPCS_SwiftTail_SaveList);
81 } else if (F.hasFnAttribute("interrupt")) {
82 if (STI.isMClass()) {
83 // M-class CPUs have hardware which saves the registers needed to allow a
84 // function conforming to the AAPCS to function as a handler.
85 return UseSplitPush ? CSR_ATPCS_SplitPush_SaveList : CSR_AAPCS_SaveList;
86 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
87 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
88 // need to be saved to restore user-mode state.
89 return CSR_FIQ_SaveList;
90 } else {
91 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
92 // exception handling.
93 return CSR_GenericInt_SaveList;
94 }
95 }
96
98 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
99 if (STI.isTargetDarwin())
100 return CSR_iOS_SwiftError_SaveList;
101
102 return UseSplitPush ? CSR_ATPCS_SplitPush_SwiftError_SaveList :
103 CSR_AAPCS_SwiftError_SaveList;
104 }
105
106 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS)
107 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
108 ? CSR_iOS_CXX_TLS_PE_SaveList
109 : CSR_iOS_CXX_TLS_SaveList;
110
111 if (STI.isTargetDarwin())
112 return CSR_iOS_SaveList;
113
114 if (UseSplitPush)
115 return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_SaveList
116 : CSR_ATPCS_SplitPush_SaveList;
117
118 return CSR_AAPCS_SaveList;
119}
120
122 const MachineFunction *MF) const {
123 assert(MF && "Invalid MachineFunction pointer.");
126 return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
127 return nullptr;
128}
129
130const uint32_t *
132 CallingConv::ID CC) const {
133 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
134 if (CC == CallingConv::GHC)
135 // This is academic because all GHC calls are (supposed to be) tail calls
136 return CSR_NoRegs_RegMask;
138 return CSR_Win_AAPCS_CFGuard_Check_RegMask;
139 if (CC == CallingConv::SwiftTail) {
140 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_RegMask
141 : CSR_AAPCS_SwiftTail_RegMask;
142 }
144 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
145 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask
146 : CSR_AAPCS_SwiftError_RegMask;
147
149 return CSR_iOS_CXX_TLS_RegMask;
150 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
151}
152
153const uint32_t*
155 return CSR_NoRegs_RegMask;
156}
157
158const uint32_t *
161 "only know about special TLS call on Darwin");
162 return CSR_iOS_TLSCall_RegMask;
163}
164
165const uint32_t *
167 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
168 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only())
169 return CSR_NoRegs_RegMask;
170 else
171 return CSR_FPRegs_RegMask;
172}
173
174const uint32_t *
176 CallingConv::ID CC) const {
177 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
178 // This should return a register mask that is the same as that returned by
179 // getCallPreservedMask but that additionally preserves the register used for
180 // the first i32 argument (which must also be the register used to return a
181 // single i32 return value)
182 //
183 // In case that the calling convention does not use the same register for
184 // both or otherwise does not want to enable this optimization, the function
185 // should return NULL
186 if (CC == CallingConv::GHC)
187 // This is academic because all GHC calls are (supposed to be) tail calls
188 return nullptr;
189 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
190 : CSR_AAPCS_ThisReturn_RegMask;
191}
192
194 const MachineFunction *MF) const {
195 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12};
196 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs);
197}
198
200getReservedRegs(const MachineFunction &MF) const {
201 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
202 const ARMFrameLowering *TFI = getFrameLowering(MF);
203
204 // FIXME: avoid re-calculating this every time.
205 BitVector Reserved(getNumRegs());
206 markSuperRegs(Reserved, ARM::SP);
207 markSuperRegs(Reserved, ARM::PC);
208 markSuperRegs(Reserved, ARM::FPSCR);
209 markSuperRegs(Reserved, ARM::APSR_NZCV);
210 if (TFI->hasFP(MF))
211 markSuperRegs(Reserved, STI.getFramePointerReg());
212 if (hasBasePointer(MF))
213 markSuperRegs(Reserved, BasePtr);
214 // Some targets reserve R9.
215 if (STI.isR9Reserved())
216 markSuperRegs(Reserved, ARM::R9);
217 // Reserve D16-D31 if the subtarget doesn't support them.
218 if (!STI.hasD32()) {
219 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!");
220 for (unsigned R = 0; R < 16; ++R)
221 markSuperRegs(Reserved, ARM::D16 + R);
222 }
223 const TargetRegisterClass &RC = ARM::GPRPairRegClass;
224 for (unsigned Reg : RC)
225 for (MCPhysReg S : subregs(Reg))
226 if (Reserved.test(S))
227 markSuperRegs(Reserved, Reg);
228 // For v8.1m architecture
229 markSuperRegs(Reserved, ARM::ZR);
230
231 assert(checkAllSuperRegsMarked(Reserved));
232 return Reserved;
233}
234
236isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const {
237 return !getReservedRegs(MF).test(PhysReg);
238}
239
241 unsigned PhysReg) const {
242 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
243 const ARMFrameLowering *TFI = getFrameLowering(MF);
244
245 BitVector Reserved(getNumRegs());
246 markSuperRegs(Reserved, ARM::PC);
247 if (TFI->isFPReserved(MF))
248 markSuperRegs(Reserved, STI.getFramePointerReg());
249 if (hasBasePointer(MF))
250 markSuperRegs(Reserved, BasePtr);
251 assert(checkAllSuperRegsMarked(Reserved));
252 return Reserved.test(PhysReg);
253}
254
257 const MachineFunction &MF) const {
258 const TargetRegisterClass *Super = RC;
260 do {
261 switch (Super->getID()) {
262 case ARM::GPRRegClassID:
263 case ARM::SPRRegClassID:
264 case ARM::DPRRegClassID:
265 case ARM::GPRPairRegClassID:
266 return Super;
267 case ARM::QPRRegClassID:
268 case ARM::QQPRRegClassID:
269 case ARM::QQQQPRRegClassID:
270 if (MF.getSubtarget<ARMSubtarget>().hasNEON())
271 return Super;
272 break;
273 case ARM::MQPRRegClassID:
274 case ARM::MQQPRRegClassID:
275 case ARM::MQQQQPRRegClassID:
276 if (MF.getSubtarget<ARMSubtarget>().hasMVEIntegerOps())
277 return Super;
278 break;
279 }
280 Super = *I++;
281 } while (Super);
282 return RC;
283}
284
287 const {
288 return &ARM::GPRRegClass;
289}
290
293 if (RC == &ARM::CCRRegClass)
294 return &ARM::rGPRRegClass; // Can't copy CCR registers.
295 return RC;
296}
297
298unsigned
300 MachineFunction &MF) const {
301 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
302 const ARMFrameLowering *TFI = getFrameLowering(MF);
303
304 switch (RC->getID()) {
305 default:
306 return 0;
307 case ARM::tGPRRegClassID: {
308 // hasFP ends up calling getMaxCallFrameComputed() which may not be
309 // available when getPressureLimit() is called as part of
310 // ScheduleDAGRRList.
311 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
312 ? TFI->hasFP(MF) : true;
313 return 5 - HasFP;
314 }
315 case ARM::GPRRegClassID: {
316 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
317 ? TFI->hasFP(MF) : true;
318 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
319 }
320 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
321 case ARM::DPRRegClassID:
322 return 32 - 10;
323 }
324}
325
326// Get the other register in a GPRPair.
327static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd,
328 const MCRegisterInfo *RI) {
329 for (MCPhysReg Super : RI->superregs(Reg))
330 if (ARM::GPRPairRegClass.contains(Super))
331 return RI->getSubReg(Super, Odd ? ARM::gsub_1 : ARM::gsub_0);
332 return 0;
333}
334
335// Resolve the RegPairEven / RegPairOdd register allocator hints.
337 Register VirtReg, ArrayRef<MCPhysReg> Order,
339 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
340 const MachineRegisterInfo &MRI = MF.getRegInfo();
341 std::pair<unsigned, Register> Hint = MRI.getRegAllocationHint(VirtReg);
342
343 unsigned Odd;
344 switch (Hint.first) {
346 Odd = 0;
347 break;
349 Odd = 1;
350 break;
351 case ARMRI::RegLR:
352 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
353 if (MRI.getRegClass(VirtReg)->contains(ARM::LR))
354 Hints.push_back(ARM::LR);
355 return false;
356 default:
357 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
358 }
359
360 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
361 // Check if the other part of the pair has already been assigned, and provide
362 // the paired register as the first hint.
363 Register Paired = Hint.second;
364 if (!Paired)
365 return false;
366
367 Register PairedPhys;
368 if (Paired.isPhysical()) {
369 PairedPhys = Paired;
370 } else if (VRM && VRM->hasPhys(Paired)) {
371 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
372 }
373
374 // First prefer the paired physreg.
375 if (PairedPhys && is_contained(Order, PairedPhys))
376 Hints.push_back(PairedPhys);
377
378 // Then prefer even or odd registers.
379 for (MCPhysReg Reg : Order) {
380 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
381 continue;
382 // Don't provide hints that are paired to a reserved register.
383 MCPhysReg Paired = getPairedGPR(Reg, !Odd, this);
384 if (!Paired || MRI.isReserved(Paired))
385 continue;
386 Hints.push_back(Reg);
387 }
388 return false;
389}
390
392 MachineFunction &MF) const {
394 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(Reg);
395 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) &&
396 Hint.second.isVirtual()) {
397 // If 'Reg' is one of the even / odd register pair and it's now changed
398 // (e.g. coalesced) into a different register. The other register of the
399 // pair allocation hint must be updated to reflect the relationship
400 // change.
401 Register OtherReg = Hint.second;
402 Hint = MRI->getRegAllocationHint(OtherReg);
403 // Make sure the pair has not already divorced.
404 if (Hint.second == Reg) {
405 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
406 if (NewReg.isVirtual())
407 MRI->setRegAllocationHint(NewReg,
408 Hint.first == ARMRI::RegPairOdd
411 OtherReg);
412 }
413 }
414}
415
417 const MachineFrameInfo &MFI = MF.getFrameInfo();
418 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
419 const ARMFrameLowering *TFI = getFrameLowering(MF);
420
421 // If we have stack realignment and VLAs, we have no pointer to use to
422 // access the stack. If we have stack realignment, and a large call frame,
423 // we have no place to allocate the emergency spill slot.
424 if (hasStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
425 return true;
426
427 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
428 // negative range for ldr/str (255), and Thumb1 is positive offsets only.
429 //
430 // It's going to be better to use the SP or Base Pointer instead. When there
431 // are variable sized objects, we can't reference off of the SP, so we
432 // reserve a Base Pointer.
433 //
434 // For Thumb2, estimate whether a negative offset from the frame pointer
435 // will be sufficient to reach the whole stack frame. If a function has a
436 // smallish frame, it's less likely to have lots of spills and callee saved
437 // space, so it's all more likely to be within range of the frame pointer.
438 // If it's wrong, the scavenger will still enable access to work, it just
439 // won't be optimal. (We should always be able to reach the emergency
440 // spill slot from the frame pointer.)
441 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() &&
442 MFI.getLocalFrameSize() >= 128)
443 return true;
444 // For Thumb1, if sp moves, nothing is in range, so force a base pointer.
445 // This is necessary for correctness in cases where we need an emergency
446 // spill slot. (In Thumb1, we can't use a negative offset from the frame
447 // pointer.)
448 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF))
449 return true;
450 return false;
451}
452
454 const MachineRegisterInfo *MRI = &MF.getRegInfo();
455 const ARMFrameLowering *TFI = getFrameLowering(MF);
456 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
457 // We can't realign the stack if:
458 // 1. Dynamic stack realignment is explicitly disabled,
459 // 2. There are VLAs in the function and the base pointer is disabled.
461 return false;
462 // Stack realignment requires a frame pointer. If we already started
463 // register allocation with frame pointer elimination, it is too late now.
464 if (!MRI->canReserveReg(STI.getFramePointerReg()))
465 return false;
466 // We may also need a base pointer if there are dynamic allocas or stack
467 // pointer adjustments around calls.
468 if (TFI->hasReservedCallFrame(MF))
469 return true;
470 // A base pointer is required and allowed. Check that it isn't too late to
471 // reserve it.
472 return MRI->canReserveReg(BasePtr);
473}
474
476cannotEliminateFrame(const MachineFunction &MF) const {
477 const MachineFrameInfo &MFI = MF.getFrameInfo();
479 return true;
480 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
481 hasStackRealignment(MF);
482}
483
486 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
487 const ARMFrameLowering *TFI = getFrameLowering(MF);
488
489 if (TFI->hasFP(MF))
490 return STI.getFramePointerReg();
491 return ARM::SP;
492}
493
494/// emitLoadConstPool - Emits a load from constpool to materialize the
495/// specified immediate.
498 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
499 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
503 const Constant *C =
504 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val);
505 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
506
507 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
508 .addReg(DestReg, getDefRegState(true), SubIdx)
510 .addImm(0)
511 .add(predOps(Pred, PredReg))
512 .setMIFlags(MIFlags);
513}
514
517 return true;
518}
519
522 return true;
523}
524
527 return true;
528}
529
531getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
532 const MCInstrDesc &Desc = MI->getDesc();
533 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
534 int64_t InstrOffs = 0;
535 int Scale = 1;
536 unsigned ImmIdx = 0;
537 switch (AddrMode) {
543 InstrOffs = MI->getOperand(Idx+1).getImm();
544 Scale = 1;
545 break;
546 case ARMII::AddrMode5: {
547 // VFP address mode.
548 const MachineOperand &OffOp = MI->getOperand(Idx+1);
549 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
550 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
551 InstrOffs = -InstrOffs;
552 Scale = 4;
553 break;
554 }
555 case ARMII::AddrMode2:
556 ImmIdx = Idx+2;
557 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
558 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
559 InstrOffs = -InstrOffs;
560 break;
561 case ARMII::AddrMode3:
562 ImmIdx = Idx+2;
563 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
564 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
565 InstrOffs = -InstrOffs;
566 break;
568 ImmIdx = Idx+1;
569 InstrOffs = MI->getOperand(ImmIdx).getImm();
570 Scale = 4;
571 break;
572 default:
573 llvm_unreachable("Unsupported addressing mode!");
574 }
575
576 return InstrOffs * Scale;
577}
578
579/// needsFrameBaseReg - Returns true if the instruction's frame index
580/// reference would be better served by a base register other than FP
581/// or SP. Used by LocalStackFrameAllocation to determine which frame index
582/// references it should create new base registers for.
584needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
585 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
586 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
587 }
588
589 // It's the load/store FI references that cause issues, as it can be difficult
590 // to materialize the offset if it won't fit in the literal field. Estimate
591 // based on the size of the local frame and some conservative assumptions
592 // about the rest of the stack frame (note, this is pre-regalloc, so
593 // we don't know everything for certain yet) whether this offset is likely
594 // to be out of range of the immediate. Return true if so.
595
596 // We only generate virtual base registers for loads and stores, so
597 // return false for everything else.
598 unsigned Opc = MI->getOpcode();
599 switch (Opc) {
600 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
601 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
602 case ARM::t2LDRi12: case ARM::t2LDRi8:
603 case ARM::t2STRi12: case ARM::t2STRi8:
604 case ARM::VLDRS: case ARM::VLDRD:
605 case ARM::VSTRS: case ARM::VSTRD:
606 case ARM::tSTRspi: case ARM::tLDRspi:
607 break;
608 default:
609 return false;
610 }
611
612 // Without a virtual base register, if the function has variable sized
613 // objects, all fixed-size local references will be via the frame pointer,
614 // Approximate the offset and see if it's legal for the instruction.
615 // Note that the incoming offset is based on the SP value at function entry,
616 // so it'll be negative.
617 MachineFunction &MF = *MI->getParent()->getParent();
618 const ARMFrameLowering *TFI = getFrameLowering(MF);
619 MachineFrameInfo &MFI = MF.getFrameInfo();
621
622 // Estimate an offset from the frame pointer.
623 // Conservatively assume all callee-saved registers get pushed. R4-R6
624 // will be earlier than the FP, so we ignore those.
625 // R7, LR
626 int64_t FPOffset = Offset - 8;
627 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
628 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
629 FPOffset -= 80;
630 // Estimate an offset from the stack pointer.
631 // The incoming offset is relating to the SP at the start of the function,
632 // but when we access the local it'll be relative to the SP after local
633 // allocation, so adjust our SP-relative offset by that allocation size.
634 Offset += MFI.getLocalFrameSize();
635 // Assume that we'll have at least some spill slots allocated.
636 // FIXME: This is a total SWAG number. We should run some statistics
637 // and pick a real one.
638 Offset += 128; // 128 bytes of spill slots
639
640 // If there's a frame pointer and the addressing mode allows it, try using it.
641 // The FP is only available if there is no dynamic realignment. We
642 // don't know for sure yet whether we'll need that, so we guess based
643 // on whether there are any local variables that would trigger it.
644 if (TFI->hasFP(MF) &&
645 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) &&
646 canRealignStack(MF))) {
647 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
648 return false;
649 }
650 // If we can reference via the stack pointer, try that.
651 // FIXME: This (and the code that resolves the references) can be improved
652 // to only disallow SP relative references in the live range of
653 // the VLA(s). In practice, it's unclear how much difference that
654 // would make, but it may be worth doing.
655 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset))
656 return false;
657
658 // The offset likely isn't legal, we want to allocate a virtual base register.
659 return true;
660}
661
662/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
663/// be a pointer to FrameIdx at the beginning of the basic block.
666 int FrameIdx,
667 int64_t Offset) const {
669 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
670 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
671
673 DebugLoc DL; // Defaults to "unknown"
674 if (Ins != MBB->end())
675 DL = Ins->getDebugLoc();
676
677 const MachineFunction &MF = *MBB->getParent();
680 const MCInstrDesc &MCID = TII.get(ADDriOpc);
681 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
682 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
683
684 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
685 .addFrameIndex(FrameIdx).addImm(Offset);
686
687 if (!AFI->isThumb1OnlyFunction())
689
690 return BaseReg;
691}
692
694 int64_t Offset) const {
695 MachineBasicBlock &MBB = *MI.getParent();
697 const ARMBaseInstrInfo &TII =
698 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
700 int Off = Offset; // ARM doesn't need the general 64-bit offsets
701 unsigned i = 0;
702
704 "This resolveFrameIndex does not support Thumb1!");
705
706 while (!MI.getOperand(i).isFI()) {
707 ++i;
708 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
709 }
710 bool Done = false;
711 if (!AFI->isThumbFunction())
712 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
713 else {
714 assert(AFI->isThumb2Function());
715 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this);
716 }
717 assert(Done && "Unable to resolve frame index!");
718 (void)Done;
719}
720
722 Register BaseReg,
723 int64_t Offset) const {
724 const MCInstrDesc &Desc = MI->getDesc();
725 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
726 unsigned i = 0;
727 for (; !MI->getOperand(i).isFI(); ++i)
728 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!");
729
730 // AddrMode4 and AddrMode6 cannot handle any offset.
732 return Offset == 0;
733
734 unsigned NumBits = 0;
735 unsigned Scale = 1;
736 bool isSigned = true;
737 switch (AddrMode) {
742 // i8 supports only negative, and i12 supports only positive, so
743 // based on Offset sign, consider the appropriate instruction
744 Scale = 1;
745 if (Offset < 0) {
746 NumBits = 8;
747 Offset = -Offset;
748 } else {
749 NumBits = 12;
750 }
751 break;
752 case ARMII::AddrMode5:
753 // VFP address mode.
754 NumBits = 8;
755 Scale = 4;
756 break;
758 case ARMII::AddrMode2:
759 NumBits = 12;
760 break;
761 case ARMII::AddrMode3:
762 NumBits = 8;
763 break;
765 NumBits = (BaseReg == ARM::SP ? 8 : 5);
766 Scale = 4;
767 isSigned = false;
768 break;
769 default:
770 llvm_unreachable("Unsupported addressing mode!");
771 }
772
774 // Make sure the offset is encodable for instructions that scale the
775 // immediate.
776 if ((Offset & (Scale-1)) != 0)
777 return false;
778
779 if (isSigned && Offset < 0)
780 Offset = -Offset;
781
782 unsigned Mask = (1 << NumBits) - 1;
783 if ((unsigned)Offset <= Mask * Scale)
784 return true;
785
786 return false;
787}
788
789bool
791 int SPAdj, unsigned FIOperandNum,
792 RegScavenger *RS) const {
793 MachineInstr &MI = *II;
794 MachineBasicBlock &MBB = *MI.getParent();
796 const ARMBaseInstrInfo &TII =
797 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
798 const ARMFrameLowering *TFI = getFrameLowering(MF);
801 "This eliminateFrameIndex does not support Thumb1!");
802 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
803 Register FrameReg;
804
805 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
806
807 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
808 // call frame setup/destroy instructions have already been eliminated. That
809 // means the stack pointer cannot be used to access the emergency spill slot
810 // when !hasReservedCallFrame().
811#ifndef NDEBUG
812 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
813 assert(TFI->hasReservedCallFrame(MF) &&
814 "Cannot use SP to access the emergency spill slot in "
815 "functions without a reserved call frame");
817 "Cannot use SP to access the emergency spill slot in "
818 "functions with variable sized frame objects");
819 }
820#endif // NDEBUG
821
822 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
823
824 // Modify MI as necessary to handle as much of 'Offset' as possible
825 bool Done = false;
826 if (!AFI->isThumbFunction())
827 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
828 else {
829 assert(AFI->isThumb2Function());
830 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this);
831 }
832 if (Done)
833 return false;
834
835 // If we get here, the immediate doesn't fit into the instruction. We folded
836 // as much as possible above, handle the rest, providing a register that is
837 // SP+LargeImm.
838 assert(
839 (Offset ||
840 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
841 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 ||
842 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 ||
843 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 ||
844 (MI.getDesc().TSFlags & ARMII::AddrModeMask) ==
846 "This code isn't needed if offset already handled!");
847
848 unsigned ScratchReg = 0;
849 int PIdx = MI.findFirstPredOperandIdx();
850 ARMCC::CondCodes Pred = (PIdx == -1)
851 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
852 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
853
854 const MCInstrDesc &MCID = MI.getDesc();
855 const TargetRegisterClass *RegClass =
856 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent());
857
858 if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg)))
859 // Must be addrmode4/6.
860 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
861 else {
862 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass);
863 if (!AFI->isThumbFunction())
864 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
865 Offset, Pred, PredReg, TII);
866 else {
867 assert(AFI->isThumb2Function());
868 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
869 Offset, Pred, PredReg, TII);
870 }
871 // Update the original instruction to use the scratch register.
872 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
873 }
874 return false;
875}
876
878 const TargetRegisterClass *SrcRC,
879 unsigned SubReg,
880 const TargetRegisterClass *DstRC,
881 unsigned DstSubReg,
882 const TargetRegisterClass *NewRC,
883 LiveIntervals &LIS) const {
884 auto MBB = MI->getParent();
885 auto MF = MBB->getParent();
886 const MachineRegisterInfo &MRI = MF->getRegInfo();
887 // If not copying into a sub-register this should be ok because we shouldn't
888 // need to split the reg.
889 if (!DstSubReg)
890 return true;
891 // Small registers don't frequently cause a problem, so we can coalesce them.
892 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 &&
893 getRegSizeInBits(*SrcRC) < 256)
894 return true;
895
896 auto NewRCWeight =
897 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
898 auto SrcRCWeight =
899 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
900 auto DstRCWeight =
901 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
902 // If the source register class is more expensive than the destination, the
903 // coalescing is probably profitable.
904 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
905 return true;
906 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
907 return true;
908
909 // If the register allocator isn't constrained, we can always allow coalescing
910 // unfortunately we don't know yet if we will be constrained.
911 // The goal of this heuristic is to restrict how many expensive registers
912 // we allow to coalesce in a given basic block.
913 auto AFI = MF->getInfo<ARMFunctionInfo>();
914 auto It = AFI->getCoalescedWeight(MBB);
915
916 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
917 << It->second << "\n");
918 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
919 << NewRCWeight.RegWeight << "\n");
920
921 // This number is the largest round number that which meets the criteria:
922 // (1) addresses PR18825
923 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
924 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
925 // In practice the SizeMultiplier will only factor in for straight line code
926 // that uses a lot of NEON vectors, which isn't terribly common.
927 unsigned SizeMultiplier = MBB->size()/100;
928 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
929 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
930 It->second += NewRCWeight.RegWeight;
931 return true;
932 }
933 return false;
934}
935
937 unsigned DefSubReg,
938 const TargetRegisterClass *SrcRC,
939 unsigned SrcSubReg) const {
940 // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2).
941 if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 &&
942 SrcRC == &ARM::DPRRegClass &&
943 (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1))
944 return false;
945
946 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
947 SrcRC, SrcSubReg);
948}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd, const MCRegisterInfo *RI)
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool hasBasePointer(const MachineFunction &MF) const
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const override
const uint32_t * getNoPreservedMask() const override
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
virtual void emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred=ARMCC::AL, Register PredReg=Register(), unsigned MIFlags=MachineInstr::NoFlags) const
emitLoadConstPool - Emits a load from constpool to materialize the specified immediate.
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
bool cannotEliminateFrame(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx...
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Code Generation virtual methods...
const uint32_t * getTLSCallPreservedMask(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
unsigned BasePtr
BasePtr - ARM physical register used as a base ptr in complex stack frames.
bool isInlineAsmReadOnlyReg(const MachineFunction &MF, unsigned PhysReg) const override
ArrayRef< MCPhysReg > getIntraCallClobberedRegs(const MachineFunction *MF) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool canRealignStack(const MachineFunction &MF) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
int ResolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, int SPAdj) const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
DenseMap< constMachineBasicBlock *, unsigned >::iterator getCoalescedWeight(MachineBasicBlock *MBB)
bool isThumb1Only() const
Definition: ARMSubtarget.h:434
MCPhysReg getFramePointerReg() const
Definition: ARMSubtarget.h:444
const ARMTargetLowering * getTargetLowering() const override
Definition: ARMSubtarget.h:270
bool isTargetDarwin() const
Definition: ARMSubtarget.h:370
bool hasVFP2Base() const
Definition: ARMSubtarget.h:341
bool splitFramePushPop(const MachineFunction &MF) const
Returns true if the frame setup is split into two separate pushes (first r0-r7,lr then r8-r11),...
Definition: ARMSubtarget.h:455
bool isR9Reserved() const
Definition: ARMSubtarget.h:440
bool splitFramePointerPush(const MachineFunction &MF) const
bool isMClass() const
Definition: ARMSubtarget.h:436
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool test(unsigned Idx) const
Definition: BitVector.h:461
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:338
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:350
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
iterator_range< MCSuperRegIterator > superregs(MCRegister Reg) const
Return an iterator range over all super-registers of Reg, excluding Reg.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
bool isMaxCallFrameSizeComputed() const
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
const TargetRegisterClass *const * sc_iterator
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
sc_iterator getSuperClasses() const
Returns a NULL-terminated list of super-classes.
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
static IntegerType * getInt32Ty(LLVMContext &C)
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition: VirtRegMap.h:99
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned getAM2Offset(unsigned AM2Opc)
AddrOpc getAM5Op(unsigned AM5Opc)
unsigned char getAM5Offset(unsigned AM5Opc)
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ D16
Only 16 D registers.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Done
Definition: Threading.h:61
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:21
bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII, const TargetRegisterInfo *TRI)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
unsigned getDefRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
void emitT2RegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.