Line data Source code
1 : //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : // This file contains the Thumb-1 implementation of the TargetRegisterInfo
11 : // class.
12 : //
13 : //===----------------------------------------------------------------------===//
14 :
15 : #include "ThumbRegisterInfo.h"
16 : #include "ARMBaseInstrInfo.h"
17 : #include "ARMMachineFunctionInfo.h"
18 : #include "ARMSubtarget.h"
19 : #include "MCTargetDesc/ARMAddressingModes.h"
20 : #include "llvm/CodeGen/MachineConstantPool.h"
21 : #include "llvm/CodeGen/MachineFrameInfo.h"
22 : #include "llvm/CodeGen/MachineFunction.h"
23 : #include "llvm/CodeGen/MachineInstrBuilder.h"
24 : #include "llvm/CodeGen/MachineRegisterInfo.h"
25 : #include "llvm/CodeGen/RegisterScavenging.h"
26 : #include "llvm/IR/Constants.h"
27 : #include "llvm/IR/DerivedTypes.h"
28 : #include "llvm/IR/Function.h"
29 : #include "llvm/IR/LLVMContext.h"
30 : #include "llvm/Support/CommandLine.h"
31 : #include "llvm/Support/ErrorHandling.h"
32 : #include "llvm/CodeGen/TargetFrameLowering.h"
33 : #include "llvm/Target/TargetMachine.h"
34 :
35 : namespace llvm {
36 : extern cl::opt<bool> ReuseFrameIndexVals;
37 : }
38 :
39 : using namespace llvm;
40 :
41 2063 : ThumbRegisterInfo::ThumbRegisterInfo() : ARMBaseRegisterInfo() {}
42 :
43 : const TargetRegisterClass *
44 7011 : ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
45 : const MachineFunction &MF) const {
46 7011 : if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
47 5758 : return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
48 :
49 2506 : if (ARM::tGPRRegClass.hasSubClassEq(RC))
50 : return &ARM::tGPRRegClass;
51 2 : return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
52 : }
53 :
54 : const TargetRegisterClass *
55 176 : ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
56 : unsigned Kind) const {
57 176 : if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
58 165 : return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
59 : return &ARM::tGPRRegClass;
60 : }
61 :
62 0 : static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
63 : MachineBasicBlock::iterator &MBBI,
64 : const DebugLoc &dl, unsigned DestReg,
65 : unsigned SubIdx, int Val,
66 : ARMCC::CondCodes Pred, unsigned PredReg,
67 : unsigned MIFlags) {
68 0 : MachineFunction &MF = *MBB.getParent();
69 0 : const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
70 0 : const TargetInstrInfo &TII = *STI.getInstrInfo();
71 0 : MachineConstantPool *ConstantPool = MF.getConstantPool();
72 0 : const Constant *C = ConstantInt::get(
73 0 : Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
74 0 : unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
75 :
76 0 : BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
77 0 : .addReg(DestReg, getDefRegState(true), SubIdx)
78 0 : .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
79 : .setMIFlags(MIFlags);
80 0 : }
81 :
82 0 : static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
83 : MachineBasicBlock::iterator &MBBI,
84 : const DebugLoc &dl, unsigned DestReg,
85 : unsigned SubIdx, int Val,
86 : ARMCC::CondCodes Pred, unsigned PredReg,
87 : unsigned MIFlags) {
88 0 : MachineFunction &MF = *MBB.getParent();
89 0 : const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
90 0 : MachineConstantPool *ConstantPool = MF.getConstantPool();
91 0 : const Constant *C = ConstantInt::get(
92 0 : Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
93 0 : unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
94 :
95 0 : BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
96 0 : .addReg(DestReg, getDefRegState(true), SubIdx)
97 : .addConstantPoolIndex(Idx)
98 0 : .add(predOps(ARMCC::AL))
99 : .setMIFlags(MIFlags);
100 0 : }
101 :
102 : /// emitLoadConstPool - Emits a load from constpool to materialize the
103 : /// specified immediate.
104 91 : void ThumbRegisterInfo::emitLoadConstPool(
105 : MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
106 : const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
107 : ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
108 91 : MachineFunction &MF = *MBB.getParent();
109 91 : const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
110 91 : if (STI.isThumb1Only()) {
111 : assert((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
112 : "Thumb1 does not have ldr to high register");
113 91 : return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
114 91 : PredReg, MIFlags);
115 : }
116 0 : return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
117 0 : PredReg, MIFlags);
118 : }
119 :
120 : /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
121 : /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
122 : /// in a register using mov / mvn sequences or load the immediate from a
123 : /// constpool entry.
124 82 : static void emitThumbRegPlusImmInReg(
125 : MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
126 : const DebugLoc &dl, unsigned DestReg, unsigned BaseReg, int NumBytes,
127 : bool CanChangeCC, const TargetInstrInfo &TII,
128 : const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
129 82 : MachineFunction &MF = *MBB.getParent();
130 82 : const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
131 11 : bool isHigh = !isARMLowRegister(DestReg) ||
132 : (BaseReg != 0 && !isARMLowRegister(BaseReg));
133 : bool isSub = false;
134 : // Subtract doesn't have high register version. Load the negative value
135 : // if either base or dest register is a high register. Also, if do not
136 : // issue sub as part of the sequence if condition register is to be
137 : // preserved.
138 82 : if (NumBytes < 0 && !isHigh && CanChangeCC) {
139 : isSub = true;
140 3 : NumBytes = -NumBytes;
141 : }
142 : unsigned LdReg = DestReg;
143 : if (DestReg == ARM::SP)
144 : assert(BaseReg == ARM::SP && "Unexpected!");
145 71 : if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
146 120 : LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
147 :
148 82 : if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
149 0 : BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
150 0 : .add(t1CondCodeOp())
151 0 : .addImm(NumBytes)
152 : .setMIFlags(MIFlags);
153 82 : } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
154 0 : BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
155 0 : .add(t1CondCodeOp())
156 0 : .addImm(NumBytes)
157 : .setMIFlags(MIFlags);
158 0 : BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg)
159 0 : .add(t1CondCodeOp())
160 0 : .addReg(LdReg, RegState::Kill)
161 0 : .setMIFlags(MIFlags);
162 82 : } else if (ST.genExecuteOnly()) {
163 24 : BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), LdReg)
164 8 : .addImm(NumBytes).setMIFlags(MIFlags);
165 : } else
166 74 : MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
167 74 : MIFlags);
168 :
169 : // Emit add / sub.
170 82 : int Opc = (isSub) ? ARM::tSUBrr
171 79 : : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
172 164 : MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
173 82 : if (Opc != ARM::tADDhirr)
174 7 : MIB = MIB.add(t1CondCodeOp());
175 82 : if (DestReg == ARM::SP || isSub)
176 63 : MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
177 : else
178 19 : MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
179 82 : MIB.add(predOps(ARMCC::AL));
180 82 : }
181 :
182 : /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
183 : /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
184 : /// SUBs first, and uses a constant pool value if the instruction sequence would
185 : /// be too long. This is allowed to modify the condition flags.
186 1583 : void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
187 : MachineBasicBlock::iterator &MBBI,
188 : const DebugLoc &dl, unsigned DestReg,
189 : unsigned BaseReg, int NumBytes,
190 : const TargetInstrInfo &TII,
191 : const ARMBaseRegisterInfo &MRI,
192 : unsigned MIFlags) {
193 : bool isSub = NumBytes < 0;
194 1583 : unsigned Bytes = (unsigned)NumBytes;
195 1583 : if (isSub) Bytes = -NumBytes;
196 :
197 : int CopyOpc = 0;
198 : unsigned CopyBits = 0;
199 : unsigned CopyScale = 1;
200 : bool CopyNeedsCC = false;
201 : int ExtraOpc = 0;
202 : unsigned ExtraBits = 0;
203 : unsigned ExtraScale = 1;
204 : bool ExtraNeedsCC = false;
205 :
206 : // Strategy:
207 : // We need to select two types of instruction, maximizing the available
208 : // immediate range of each. The instructions we use will depend on whether
209 : // DestReg and BaseReg are low, high or the stack pointer.
210 : // * CopyOpc - DestReg = BaseReg + imm
211 : // This will be emitted once if DestReg != BaseReg, and never if
212 : // DestReg == BaseReg.
213 : // * ExtraOpc - DestReg = DestReg + imm
214 : // This will be emitted as many times as necessary to add the
215 : // full immediate.
216 : // If the immediate ranges of these instructions are not large enough to cover
217 : // NumBytes with a reasonable number of instructions, we fall back to using a
218 : // value loaded from a constant pool.
219 1583 : if (DestReg == ARM::SP) {
220 1213 : if (BaseReg == ARM::SP) {
221 : // sp -> sp
222 : // Already in right reg, no copy needed
223 : } else {
224 : // low -> sp or high -> sp
225 : CopyOpc = ARM::tMOVr;
226 : CopyBits = 0;
227 : }
228 1213 : ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
229 : ExtraBits = 7;
230 : ExtraScale = 4;
231 : } else if (isARMLowRegister(DestReg)) {
232 370 : if (BaseReg == ARM::SP) {
233 : // sp -> low
234 : assert(!isSub && "Thumb1 does not have tSUBrSPi");
235 : CopyOpc = ARM::tADDrSPi;
236 : CopyBits = 8;
237 : CopyScale = 4;
238 97 : } else if (DestReg == BaseReg) {
239 : // low -> same low
240 : // Already in right reg, no copy needed
241 : } else if (isARMLowRegister(BaseReg)) {
242 : // low -> different low
243 97 : CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
244 : CopyBits = 3;
245 : CopyNeedsCC = true;
246 : } else {
247 : // high -> low
248 : CopyOpc = ARM::tMOVr;
249 : CopyBits = 0;
250 : }
251 370 : ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
252 : ExtraBits = 8;
253 : ExtraNeedsCC = true;
254 : } else /* DestReg is high */ {
255 0 : if (DestReg == BaseReg) {
256 : // high -> same high
257 : // Already in right reg, no copy needed
258 : } else {
259 : // {low,high,sp} -> high
260 : CopyOpc = ARM::tMOVr;
261 : CopyBits = 0;
262 : }
263 : ExtraOpc = 0;
264 : }
265 :
266 : // We could handle an unaligned immediate with an unaligned copy instruction
267 : // and an aligned extra instruction, but this case is not currently needed.
268 : assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
269 : "Unaligned offset, but all instructions require alignment");
270 :
271 1583 : unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
272 : // If we would emit the copy with an immediate of 0, just use tMOVr.
273 1583 : if (CopyOpc && Bytes < CopyScale) {
274 : CopyOpc = ARM::tMOVr;
275 : CopyScale = 1;
276 : CopyNeedsCC = false;
277 51 : CopyRange = 0;
278 : }
279 1583 : unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
280 1583 : unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
281 1583 : unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
282 :
283 : // We could handle this case when the copy instruction does not require an
284 : // aligned immediate, but we do not currently do this.
285 : assert(RangeAfterCopy % ExtraScale == 0 &&
286 : "Extra instruction requires immediate to be aligned");
287 :
288 : unsigned RequiredExtraInstrs;
289 1583 : if (ExtraRange)
290 3166 : RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
291 0 : else if (RangeAfterCopy > 0)
292 : // We need an extra instruction but none is available
293 : RequiredExtraInstrs = 1000000;
294 : else
295 : RequiredExtraInstrs = 0;
296 1583 : unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
297 1583 : unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
298 :
299 : // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
300 1583 : if (RequiredInstrs > Threshold) {
301 69 : emitThumbRegPlusImmInReg(MBB, MBBI, dl,
302 : DestReg, BaseReg, NumBytes, true,
303 : TII, MRI, MIFlags);
304 69 : return;
305 : }
306 :
307 : // Emit zero or one copy instructions
308 1514 : if (CopyOpc) {
309 361 : unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
310 361 : Bytes -= CopyImm * CopyScale;
311 :
312 722 : MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
313 361 : if (CopyNeedsCC)
314 85 : MIB = MIB.add(t1CondCodeOp());
315 361 : MIB.addReg(BaseReg, RegState::Kill);
316 361 : if (CopyOpc != ARM::tMOVr) {
317 310 : MIB.addImm(CopyImm);
318 : }
319 361 : MIB.setMIFlags(MIFlags).add(predOps(ARMCC::AL));
320 :
321 : BaseReg = DestReg;
322 : }
323 :
324 : // Emit zero or more in-place add/sub instructions
325 2435 : while (Bytes) {
326 921 : unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
327 921 : Bytes -= ExtraImm * ExtraScale;
328 :
329 1842 : MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
330 921 : if (ExtraNeedsCC)
331 94 : MIB = MIB.add(t1CondCodeOp());
332 921 : MIB.addReg(BaseReg)
333 921 : .addImm(ExtraImm)
334 921 : .add(predOps(ARMCC::AL))
335 : .setMIFlags(MIFlags);
336 : }
337 : }
338 :
339 : static void removeOperands(MachineInstr &MI, unsigned i) {
340 : unsigned Op = i;
341 90 : for (unsigned e = MI.getNumOperands(); i != e; ++i)
342 60 : MI.RemoveOperand(Op);
343 : }
344 :
345 : /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
346 : /// we're replacing the frame index with a non-SP register.
347 : static unsigned convertToNonSPOpcode(unsigned Opcode) {
348 1401 : switch (Opcode) {
349 : case ARM::tLDRspi:
350 : return ARM::tLDRi;
351 :
352 449 : case ARM::tSTRspi:
353 : return ARM::tSTRi;
354 : }
355 :
356 : return Opcode;
357 : }
358 :
359 1725 : bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
360 : unsigned FrameRegIdx,
361 : unsigned FrameReg, int &Offset,
362 : const ARMBaseInstrInfo &TII) const {
363 : MachineInstr &MI = *II;
364 1725 : MachineBasicBlock &MBB = *MI.getParent();
365 : assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
366 : "This isn't needed for thumb2!");
367 : DebugLoc dl = MI.getDebugLoc();
368 : MachineInstrBuilder MIB(*MBB.getParent(), &MI);
369 1725 : unsigned Opcode = MI.getOpcode();
370 : const MCInstrDesc &Desc = MI.getDesc();
371 1725 : unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
372 :
373 1725 : if (Opcode == ARM::tADDframe) {
374 294 : Offset += MI.getOperand(FrameRegIdx+1).getImm();
375 294 : unsigned DestReg = MI.getOperand(0).getReg();
376 :
377 294 : emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
378 : *this);
379 294 : MBB.erase(II);
380 294 : return true;
381 : } else {
382 1431 : if (AddrMode != ARMII::AddrModeT1_s)
383 0 : llvm_unreachable("Unsupported addressing mode!");
384 :
385 1431 : unsigned ImmIdx = FrameRegIdx + 1;
386 1431 : int InstrOffs = MI.getOperand(ImmIdx).getImm();
387 1431 : unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
388 : unsigned Scale = 4;
389 :
390 1431 : Offset += InstrOffs * Scale;
391 : assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
392 :
393 : // Common case: small offset, fits into instruction.
394 : MachineOperand &ImmOp = MI.getOperand(ImmIdx);
395 1431 : int ImmedOffset = Offset / Scale;
396 1431 : unsigned Mask = (1 << NumBits) - 1;
397 :
398 1431 : if ((unsigned)Offset <= Mask * Scale) {
399 : // Replace the FrameIndex with the frame register (e.g., sp).
400 1401 : MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
401 1401 : ImmOp.ChangeToImmediate(ImmedOffset);
402 :
403 : // If we're using a register where sp was stored, convert the instruction
404 : // to the non-SP version.
405 : unsigned NewOpc = convertToNonSPOpcode(Opcode);
406 1401 : if (NewOpc != Opcode && FrameReg != ARM::SP)
407 100 : MI.setDesc(TII.get(NewOpc));
408 :
409 1401 : return true;
410 : }
411 :
412 : NumBits = 5;
413 : Mask = (1 << NumBits) - 1;
414 :
415 : // If this is a thumb spill / restore, we will be using a constpool load to
416 : // materialize the offset.
417 30 : if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
418 30 : ImmOp.ChangeToImmediate(0);
419 : } else {
420 : // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
421 0 : ImmedOffset = ImmedOffset & Mask;
422 0 : ImmOp.ChangeToImmediate(ImmedOffset);
423 0 : Offset &= ~(Mask * Scale);
424 : }
425 : }
426 :
427 30 : return Offset == 0;
428 : }
429 :
430 28 : void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
431 : int64_t Offset) const {
432 28 : const MachineFunction &MF = *MI.getParent()->getParent();
433 28 : const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
434 28 : if (!STI.isThumb1Only())
435 15 : return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
436 :
437 13 : const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
438 13 : int Off = Offset; // ARM doesn't need the general 64-bit offsets
439 : unsigned i = 0;
440 :
441 52 : while (!MI.getOperand(i).isFI()) {
442 13 : ++i;
443 : assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
444 : }
445 13 : bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
446 : assert (Done && "Unable to resolve frame index!");
447 : (void)Done;
448 : }
449 :
450 : /// saveScavengerRegister - Spill the register so it can be used by the
451 : /// register scavenger. Return true.
452 3 : bool ThumbRegisterInfo::saveScavengerRegister(
453 : MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
454 : MachineBasicBlock::iterator &UseMI, const TargetRegisterClass *RC,
455 : unsigned Reg) const {
456 :
457 3 : const ARMSubtarget &STI = MBB.getParent()->getSubtarget<ARMSubtarget>();
458 3 : if (!STI.isThumb1Only())
459 1 : return ARMBaseRegisterInfo::saveScavengerRegister(MBB, I, UseMI, RC, Reg);
460 :
461 : // Thumb1 can't use the emergency spill slot on the stack because
462 : // ldr/str immediate offsets must be positive, and if we're referencing
463 : // off the frame pointer (if, for example, there are alloca() calls in
464 : // the function, the offset will be negative. Use R12 instead since that's
465 : // a call clobbered register that we know won't be used in Thumb1 mode.
466 2 : const TargetInstrInfo &TII = *STI.getInstrInfo();
467 2 : DebugLoc DL;
468 4 : BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
469 2 : .addReg(ARM::R12, RegState::Define)
470 2 : .addReg(Reg, RegState::Kill)
471 2 : .add(predOps(ARMCC::AL));
472 :
473 : // The UseMI is where we would like to restore the register. If there's
474 : // interference with R12 before then, however, we'll need to restore it
475 : // before that instead and adjust the UseMI.
476 : bool done = false;
477 26 : for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
478 : if (II->isDebugInstr())
479 : continue;
480 : // If this instruction affects R12, adjust our restore point.
481 112 : for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
482 88 : const MachineOperand &MO = II->getOperand(i);
483 88 : if (MO.isRegMask() && MO.clobbersPhysReg(ARM::R12)) {
484 0 : UseMI = II;
485 : done = true;
486 0 : break;
487 : }
488 88 : if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
489 : TargetRegisterInfo::isVirtualRegister(MO.getReg()))
490 : continue;
491 16 : if (MO.getReg() == ARM::R12) {
492 0 : UseMI = II;
493 : done = true;
494 0 : break;
495 : }
496 : }
497 : }
498 : // Restore the register from R12
499 4 : BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr))
500 2 : .addReg(Reg, RegState::Define)
501 2 : .addReg(ARM::R12, RegState::Kill)
502 2 : .add(predOps(ARMCC::AL));
503 :
504 : return true;
505 : }
506 :
507 7222 : void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
508 : int SPAdj, unsigned FIOperandNum,
509 : RegScavenger *RS) const {
510 : MachineInstr &MI = *II;
511 7222 : MachineBasicBlock &MBB = *MI.getParent();
512 7222 : MachineFunction &MF = *MBB.getParent();
513 7222 : const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
514 7222 : if (!STI.isThumb1Only())
515 5510 : return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
516 7192 : RS);
517 :
518 : unsigned VReg = 0;
519 1712 : const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
520 : DebugLoc dl = MI.getDebugLoc();
521 1712 : MachineInstrBuilder MIB(*MBB.getParent(), &MI);
522 :
523 : unsigned FrameReg;
524 1712 : int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
525 1712 : const ARMFrameLowering *TFI = getFrameLowering(MF);
526 1712 : int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
527 :
528 : // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
529 : // call frame setup/destroy instructions have already been eliminated. That
530 : // means the stack pointer cannot be used to access the emergency spill slot
531 : // when !hasReservedCallFrame().
532 : #ifndef NDEBUG
533 : if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
534 : assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
535 : "Cannot use SP to access the emergency spill slot in "
536 : "functions without a reserved call frame");
537 : assert(!MF.getFrameInfo().hasVarSizedObjects() &&
538 : "Cannot use SP to access the emergency spill slot in "
539 : "functions with variable sized frame objects");
540 : }
541 : #endif // NDEBUG
542 :
543 : // Special handling of dbg_value instructions.
544 1712 : if (MI.isDebugValue()) {
545 0 : MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/);
546 0 : MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
547 0 : return;
548 : }
549 :
550 : // Modify MI as necessary to handle as much of 'Offset' as possible
551 : assert(MF.getInfo<ARMFunctionInfo>()->isThumbFunction() &&
552 : "This eliminateFrameIndex only supports Thumb1!");
553 3424 : if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
554 : return;
555 :
556 : // If we get here, the immediate doesn't fit into the instruction. We folded
557 : // as much as possible above, handle the rest, providing a register that is
558 : // SP+LargeImm.
559 : assert(Offset && "This code isn't needed if offset already handled!");
560 :
561 30 : unsigned Opcode = MI.getOpcode();
562 :
563 : // Remove predicate first.
564 30 : int PIdx = MI.findFirstPredOperandIdx();
565 30 : if (PIdx != -1)
566 30 : removeOperands(MI, PIdx);
567 :
568 30 : if (MI.mayLoad()) {
569 : // Use the destination register to materialize sp + offset.
570 19 : unsigned TmpReg = MI.getOperand(0).getReg();
571 : bool UseRR = false;
572 19 : if (Opcode == ARM::tLDRspi) {
573 19 : if (FrameReg == ARM::SP || STI.genExecuteOnly())
574 2 : emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
575 : Offset, false, TII, *this);
576 : else {
577 17 : emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
578 : UseRR = true;
579 : }
580 : } else {
581 0 : emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
582 : *this);
583 : }
584 :
585 19 : MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
586 38 : MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
587 19 : if (UseRR)
588 : // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
589 : // register. The offset is already handled in the vreg value.
590 34 : MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
591 : false);
592 11 : } else if (MI.mayStore()) {
593 22 : VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
594 : bool UseRR = false;
595 :
596 11 : if (Opcode == ARM::tSTRspi) {
597 11 : if (FrameReg == ARM::SP || STI.genExecuteOnly())
598 11 : emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
599 : Offset, false, TII, *this);
600 : else {
601 0 : emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
602 : UseRR = true;
603 : }
604 : } else
605 0 : emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
606 : *this);
607 11 : MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
608 22 : MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
609 11 : if (UseRR)
610 : // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
611 : // register. The offset is already handled in the vreg value.
612 0 : MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
613 : false);
614 : } else {
615 0 : llvm_unreachable("Unexpected opcode!");
616 : }
617 :
618 : // Add predicate back if it's needed.
619 30 : if (MI.isPredicable())
620 30 : MIB.add(predOps(ARMCC::AL));
621 : }
|