Bug Summary

File:lib/Target/ARM/Thumb1RegisterInfo.cpp
Location:line 237, column 5
Description:Value stored to 'CopyBits' is never read

Annotated Source Code

1//===-- Thumb1RegisterInfo.cpp - Thumb-1 Register Information -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Thumb-1 implementation of the TargetRegisterInfo
11// class.
12//
13//===----------------------------------------------------------------------===//
14
15#include "Thumb1RegisterInfo.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMMachineFunctionInfo.h"
18#include "ARMSubtarget.h"
19#include "MCTargetDesc/ARMAddressingModes.h"
20#include "llvm/CodeGen/MachineConstantPool.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/RegisterScavenging.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DerivedTypes.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Target/TargetFrameLowering.h"
33#include "llvm/Target/TargetMachine.h"
34
35namespace llvm {
36extern cl::opt<bool> ReuseFrameIndexVals;
37}
38
39using namespace llvm;
40
41Thumb1RegisterInfo::Thumb1RegisterInfo(const ARMSubtarget &sti)
42 : ARMBaseRegisterInfo(sti) {
43}
44
45const TargetRegisterClass*
46Thumb1RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
47 const {
48 if (ARM::tGPRRegClass.hasSubClassEq(RC))
49 return &ARM::tGPRRegClass;
50 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC);
51}
52
53const TargetRegisterClass *
54Thumb1RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
55 const {
56 return &ARM::tGPRRegClass;
57}
58
59/// emitLoadConstPool - Emits a load from constpool to materialize the
60/// specified immediate.
61void
62Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
63 MachineBasicBlock::iterator &MBBI,
64 DebugLoc dl,
65 unsigned DestReg, unsigned SubIdx,
66 int Val,
67 ARMCC::CondCodes Pred, unsigned PredReg,
68 unsigned MIFlags) const {
69 assert((isARMLowRegister(DestReg) ||(((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
"Thumb1 does not have ldr to high register") ? static_cast<
void> (0) : __assert_fail ("(isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) && \"Thumb1 does not have ldr to high register\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 71, __PRETTY_FUNCTION__))
70 isVirtualRegister(DestReg)) &&(((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
"Thumb1 does not have ldr to high register") ? static_cast<
void> (0) : __assert_fail ("(isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) && \"Thumb1 does not have ldr to high register\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 71, __PRETTY_FUNCTION__))
71 "Thumb1 does not have ldr to high register")(((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
"Thumb1 does not have ldr to high register") ? static_cast<
void> (0) : __assert_fail ("(isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) && \"Thumb1 does not have ldr to high register\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 71, __PRETTY_FUNCTION__))
;
72
73 MachineFunction &MF = *MBB.getParent();
74 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
75 MachineConstantPool *ConstantPool = MF.getConstantPool();
76 const Constant *C = ConstantInt::get(
77 Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
78 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
79
80 BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
81 .addReg(DestReg, getDefRegState(true), SubIdx)
82 .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
83 .setMIFlags(MIFlags);
84}
85
86
87/// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
88/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
89/// in a register using mov / mvn sequences or load the immediate from a
90/// constpool entry.
91static
92void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator &MBBI,
94 DebugLoc dl,
95 unsigned DestReg, unsigned BaseReg,
96 int NumBytes, bool CanChangeCC,
97 const TargetInstrInfo &TII,
98 const ARMBaseRegisterInfo& MRI,
99 unsigned MIFlags = MachineInstr::NoFlags) {
100 MachineFunction &MF = *MBB.getParent();
101 bool isHigh = !isARMLowRegister(DestReg) ||
102 (BaseReg != 0 && !isARMLowRegister(BaseReg));
103 bool isSub = false;
104 // Subtract doesn't have high register version. Load the negative value
105 // if either base or dest register is a high register. Also, if do not
106 // issue sub as part of the sequence if condition register is to be
107 // preserved.
108 if (NumBytes < 0 && !isHigh && CanChangeCC) {
109 isSub = true;
110 NumBytes = -NumBytes;
111 }
112 unsigned LdReg = DestReg;
113 if (DestReg == ARM::SP)
114 assert(BaseReg == ARM::SP && "Unexpected!")((BaseReg == ARM::SP && "Unexpected!") ? static_cast<
void> (0) : __assert_fail ("BaseReg == ARM::SP && \"Unexpected!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 114, __PRETTY_FUNCTION__))
;
115 if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
116 LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
117
118 if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
119 AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
120 .addImm(NumBytes).setMIFlags(MIFlags);
121 } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
122 AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
123 .addImm(NumBytes).setMIFlags(MIFlags);
124 AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
125 .addReg(LdReg, RegState::Kill).setMIFlags(MIFlags);
126 } else
127 MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes,
128 ARMCC::AL, 0, MIFlags);
129
130 // Emit add / sub.
131 int Opc = (isSub) ? ARM::tSUBrr : ((isHigh || !CanChangeCC) ? ARM::tADDhirr
132 : ARM::tADDrr);
133 MachineInstrBuilder MIB =
134 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
135 if (Opc != ARM::tADDhirr)
136 MIB = AddDefaultT1CC(MIB);
137 if (DestReg == ARM::SP || isSub)
138 MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
139 else
140 MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
141 AddDefaultPred(MIB);
142}
143
144/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
145/// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
146/// SUBs first, and uses a constant pool value if the instruction sequence would
147/// be too long. This is allowed to modify the condition flags.
148void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
149 MachineBasicBlock::iterator &MBBI,
150 DebugLoc dl,
151 unsigned DestReg, unsigned BaseReg,
152 int NumBytes, const TargetInstrInfo &TII,
153 const ARMBaseRegisterInfo& MRI,
154 unsigned MIFlags) {
155 bool isSub = NumBytes < 0;
156 unsigned Bytes = (unsigned)NumBytes;
157 if (isSub) Bytes = -NumBytes;
158
159 int CopyOpc = 0;
160 unsigned CopyBits = 0;
161 unsigned CopyScale = 1;
162 bool CopyNeedsCC = false;
163 int ExtraOpc = 0;
164 unsigned ExtraBits = 0;
165 unsigned ExtraScale = 1;
166 bool ExtraNeedsCC = false;
167
168 // Strategy:
169 // We need to select two types of instruction, maximizing the available
170 // immediate range of each. The instructions we use will depend on whether
171 // DestReg and BaseReg are low, high or the stack pointer.
172 // * CopyOpc - DestReg = BaseReg + imm
173 // This will be emitted once if DestReg != BaseReg, and never if
174 // DestReg == BaseReg.
175 // * ExtraOpc - DestReg = DestReg + imm
176 // This will be emitted as many times as necessary to add the
177 // full immediate.
178 // If the immediate ranges of these instructions are not large enough to cover
179 // NumBytes with a reasonable number of instructions, we fall back to using a
180 // value loaded from a constant pool.
181 if (DestReg == ARM::SP) {
182 if (BaseReg == ARM::SP) {
183 // sp -> sp
184 // Already in right reg, no copy needed
185 } else {
186 // low -> sp or high -> sp
187 CopyOpc = ARM::tMOVr;
188 CopyBits = 0;
189 }
190 ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
191 ExtraBits = 7;
192 ExtraScale = 4;
193 } else if (isARMLowRegister(DestReg)) {
194 if (BaseReg == ARM::SP) {
195 // sp -> low
196 assert(!isSub && "Thumb1 does not have tSUBrSPi")((!isSub && "Thumb1 does not have tSUBrSPi") ? static_cast
<void> (0) : __assert_fail ("!isSub && \"Thumb1 does not have tSUBrSPi\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 196, __PRETTY_FUNCTION__))
;
197 CopyOpc = ARM::tADDrSPi;
198 CopyBits = 8;
199 CopyScale = 4;
200 } else if (DestReg == BaseReg) {
201 // low -> same low
202 // Already in right reg, no copy needed
203 } else if (isARMLowRegister(BaseReg)) {
204 // low -> different low
205 CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
206 CopyBits = 3;
207 CopyNeedsCC = true;
208 } else {
209 // high -> low
210 CopyOpc = ARM::tMOVr;
211 CopyBits = 0;
212 }
213 ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
214 ExtraBits = 8;
215 ExtraNeedsCC = true;
216 } else /* DestReg is high */ {
217 if (DestReg == BaseReg) {
218 // high -> same high
219 // Already in right reg, no copy needed
220 } else {
221 // {low,high,sp} -> high
222 CopyOpc = ARM::tMOVr;
223 CopyBits = 0;
224 }
225 ExtraOpc = 0;
226 }
227
228 // We could handle an unaligned immediate with an unaligned copy instruction
229 // and an aligned extra instruction, but this case is not currently needed.
230 assert(((Bytes & 3) == 0 || ExtraScale == 1) &&((((Bytes & 3) == 0 || ExtraScale == 1) && "Unaligned offset, but all instructions require alignment"
) ? static_cast<void> (0) : __assert_fail ("((Bytes & 3) == 0 || ExtraScale == 1) && \"Unaligned offset, but all instructions require alignment\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 231, __PRETTY_FUNCTION__))
231 "Unaligned offset, but all instructions require alignment")((((Bytes & 3) == 0 || ExtraScale == 1) && "Unaligned offset, but all instructions require alignment"
) ? static_cast<void> (0) : __assert_fail ("((Bytes & 3) == 0 || ExtraScale == 1) && \"Unaligned offset, but all instructions require alignment\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 231, __PRETTY_FUNCTION__))
;
232
233 unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
234 // If we would emit the copy with an immediate of 0, just use tMOVr.
235 if (CopyOpc && Bytes < CopyScale) {
236 CopyOpc = ARM::tMOVr;
237 CopyBits = 0;
Value stored to 'CopyBits' is never read
238 CopyScale = 1;
239 CopyNeedsCC = false;
240 CopyRange = 0;
241 }
242 unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
243 unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
244 unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
245
246 // We could handle this case when the copy instruction does not require an
247 // aligned immediate, but we do not currently do this.
248 assert(RangeAfterCopy % ExtraScale == 0 &&((RangeAfterCopy % ExtraScale == 0 && "Extra instruction requires immediate to be aligned"
) ? static_cast<void> (0) : __assert_fail ("RangeAfterCopy % ExtraScale == 0 && \"Extra instruction requires immediate to be aligned\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 249, __PRETTY_FUNCTION__))
249 "Extra instruction requires immediate to be aligned")((RangeAfterCopy % ExtraScale == 0 && "Extra instruction requires immediate to be aligned"
) ? static_cast<void> (0) : __assert_fail ("RangeAfterCopy % ExtraScale == 0 && \"Extra instruction requires immediate to be aligned\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 249, __PRETTY_FUNCTION__))
;
250
251 unsigned RequiredExtraInstrs;
252 if (ExtraRange)
253 RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
254 else if (RangeAfterCopy > 0)
255 // We need an extra instruction but none is available
256 RequiredExtraInstrs = 1000000;
257 else
258 RequiredExtraInstrs = 0;
259 unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
260 unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
261
262 // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
263 if (RequiredInstrs > Threshold) {
264 emitThumbRegPlusImmInReg(MBB, MBBI, dl,
265 DestReg, BaseReg, NumBytes, true,
266 TII, MRI, MIFlags);
267 return;
268 }
269
270 // Emit zero or one copy instructions
271 if (CopyOpc) {
272 unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
273 Bytes -= CopyImm * CopyScale;
274
275 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
276 if (CopyNeedsCC)
277 MIB = AddDefaultT1CC(MIB);
278 MIB.addReg(BaseReg, RegState::Kill);
279 if (CopyOpc != ARM::tMOVr) {
280 MIB.addImm(CopyImm);
281 }
282 AddDefaultPred(MIB.setMIFlags(MIFlags));
283
284 BaseReg = DestReg;
285 }
286
287 // Emit zero or more in-place add/sub instructions
288 while (Bytes) {
289 unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
290 Bytes -= ExtraImm * ExtraScale;
291
292 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
293 if (ExtraNeedsCC)
294 MIB = AddDefaultT1CC(MIB);
295 MIB.addReg(BaseReg).addImm(ExtraImm);
296 MIB = AddDefaultPred(MIB);
297 MIB.setMIFlags(MIFlags);
298 }
299}
300
301static void removeOperands(MachineInstr &MI, unsigned i) {
302 unsigned Op = i;
303 for (unsigned e = MI.getNumOperands(); i != e; ++i)
304 MI.RemoveOperand(Op);
305}
306
307/// convertToNonSPOpcode - Change the opcode to the non-SP version, because
308/// we're replacing the frame index with a non-SP register.
309static unsigned convertToNonSPOpcode(unsigned Opcode) {
310 switch (Opcode) {
311 case ARM::tLDRspi:
312 return ARM::tLDRi;
313
314 case ARM::tSTRspi:
315 return ARM::tSTRi;
316 }
317
318 return Opcode;
319}
320
321bool Thumb1RegisterInfo::
322rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
323 unsigned FrameReg, int &Offset,
324 const ARMBaseInstrInfo &TII) const {
325 MachineInstr &MI = *II;
326 MachineBasicBlock &MBB = *MI.getParent();
327 DebugLoc dl = MI.getDebugLoc();
328 MachineInstrBuilder MIB(*MBB.getParent(), &MI);
329 unsigned Opcode = MI.getOpcode();
330 const MCInstrDesc &Desc = MI.getDesc();
331 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
332
333 if (Opcode == ARM::tADDframe) {
334 Offset += MI.getOperand(FrameRegIdx+1).getImm();
335 unsigned DestReg = MI.getOperand(0).getReg();
336
337 emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
338 *this);
339 MBB.erase(II);
340 return true;
341 } else {
342 if (AddrMode != ARMII::AddrModeT1_s)
343 llvm_unreachable("Unsupported addressing mode!")::llvm::llvm_unreachable_internal("Unsupported addressing mode!"
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 343)
;
344
345 unsigned ImmIdx = FrameRegIdx + 1;
346 int InstrOffs = MI.getOperand(ImmIdx).getImm();
347 unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
348 unsigned Scale = 4;
349
350 Offset += InstrOffs * Scale;
351 assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!")(((Offset & (Scale - 1)) == 0 && "Can't encode this offset!"
) ? static_cast<void> (0) : __assert_fail ("(Offset & (Scale - 1)) == 0 && \"Can't encode this offset!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 351, __PRETTY_FUNCTION__))
;
352
353 // Common case: small offset, fits into instruction.
354 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
355 int ImmedOffset = Offset / Scale;
356 unsigned Mask = (1 << NumBits) - 1;
357
358 if ((unsigned)Offset <= Mask * Scale) {
359 // Replace the FrameIndex with the frame register (e.g., sp).
360 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
361 ImmOp.ChangeToImmediate(ImmedOffset);
362
363 // If we're using a register where sp was stored, convert the instruction
364 // to the non-SP version.
365 unsigned NewOpc = convertToNonSPOpcode(Opcode);
366 if (NewOpc != Opcode && FrameReg != ARM::SP)
367 MI.setDesc(TII.get(NewOpc));
368
369 return true;
370 }
371
372 NumBits = 5;
373 Mask = (1 << NumBits) - 1;
374
375 // If this is a thumb spill / restore, we will be using a constpool load to
376 // materialize the offset.
377 if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
378 ImmOp.ChangeToImmediate(0);
379 } else {
380 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
381 ImmedOffset = ImmedOffset & Mask;
382 ImmOp.ChangeToImmediate(ImmedOffset);
383 Offset &= ~(Mask * Scale);
384 }
385 }
386
387 return Offset == 0;
388}
389
390void Thumb1RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
391 int64_t Offset) const {
392 const ARMBaseInstrInfo &TII =
393 *static_cast<const ARMBaseInstrInfo *>(MI.getParent()
394 ->getParent()
395 ->getTarget()
396 .getSubtargetImpl()
397 ->getInstrInfo());
398 int Off = Offset; // ARM doesn't need the general 64-bit offsets
399 unsigned i = 0;
400
401 while (!MI.getOperand(i).isFI()) {
402 ++i;
403 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!")((i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"
) ? static_cast<void> (0) : __assert_fail ("i < MI.getNumOperands() && \"Instr doesn't have FrameIndex operand!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 403, __PRETTY_FUNCTION__))
;
404 }
405 bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
406 assert (Done && "Unable to resolve frame index!")((Done && "Unable to resolve frame index!") ? static_cast
<void> (0) : __assert_fail ("Done && \"Unable to resolve frame index!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 406, __PRETTY_FUNCTION__))
;
407 (void)Done;
408}
409
410/// saveScavengerRegister - Spill the register so it can be used by the
411/// register scavenger. Return true.
412bool
413Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
414 MachineBasicBlock::iterator I,
415 MachineBasicBlock::iterator &UseMI,
416 const TargetRegisterClass *RC,
417 unsigned Reg) const {
418 // Thumb1 can't use the emergency spill slot on the stack because
419 // ldr/str immediate offsets must be positive, and if we're referencing
420 // off the frame pointer (if, for example, there are alloca() calls in
421 // the function, the offset will be negative. Use R12 instead since that's
422 // a call clobbered register that we know won't be used in Thumb1 mode.
423 const TargetInstrInfo &TII = *MBB.getParent()->getSubtarget().getInstrInfo();
424 DebugLoc DL;
425 AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
426 .addReg(ARM::R12, RegState::Define)
427 .addReg(Reg, RegState::Kill));
428
429 // The UseMI is where we would like to restore the register. If there's
430 // interference with R12 before then, however, we'll need to restore it
431 // before that instead and adjust the UseMI.
432 bool done = false;
433 for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
434 if (II->isDebugValue())
435 continue;
436 // If this instruction affects R12, adjust our restore point.
437 for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
438 const MachineOperand &MO = II->getOperand(i);
439 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::R12)) {
440 UseMI = II;
441 done = true;
442 break;
443 }
444 if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
445 TargetRegisterInfo::isVirtualRegister(MO.getReg()))
446 continue;
447 if (MO.getReg() == ARM::R12) {
448 UseMI = II;
449 done = true;
450 break;
451 }
452 }
453 }
454 // Restore the register from R12
455 AddDefaultPred(BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)).
456 addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill));
457
458 return true;
459}
460
461void
462Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
463 int SPAdj, unsigned FIOperandNum,
464 RegScavenger *RS) const {
465 unsigned VReg = 0;
466 MachineInstr &MI = *II;
467 MachineBasicBlock &MBB = *MI.getParent();
468 MachineFunction &MF = *MBB.getParent();
469 const ARMBaseInstrInfo &TII =
470 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
471 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
472 DebugLoc dl = MI.getDebugLoc();
473 MachineInstrBuilder MIB(*MBB.getParent(), &MI);
474
475 unsigned FrameReg = ARM::SP;
476 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
477 int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
478 MF.getFrameInfo()->getStackSize() + SPAdj;
479
480 if (MF.getFrameInfo()->hasVarSizedObjects()) {
481 assert(SPAdj == 0 && MF.getSubtarget().getFrameLowering()->hasFP(MF) &&((SPAdj == 0 && MF.getSubtarget().getFrameLowering()->
hasFP(MF) && "Unexpected") ? static_cast<void> (
0) : __assert_fail ("SPAdj == 0 && MF.getSubtarget().getFrameLowering()->hasFP(MF) && \"Unexpected\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 482, __PRETTY_FUNCTION__))
482 "Unexpected")((SPAdj == 0 && MF.getSubtarget().getFrameLowering()->
hasFP(MF) && "Unexpected") ? static_cast<void> (
0) : __assert_fail ("SPAdj == 0 && MF.getSubtarget().getFrameLowering()->hasFP(MF) && \"Unexpected\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 482, __PRETTY_FUNCTION__))
;
483 // There are alloca()'s in this function, must reference off the frame
484 // pointer or base pointer instead.
485 if (!hasBasePointer(MF)) {
486 FrameReg = getFrameRegister(MF);
487 Offset -= AFI->getFramePtrSpillOffset();
488 } else
489 FrameReg = BasePtr;
490 }
491
492 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
493 // call frame setup/destroy instructions have already been eliminated. That
494 // means the stack pointer cannot be used to access the emergency spill slot
495 // when !hasReservedCallFrame().
496#ifndef NDEBUG
497 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
498 assert(MF.getTarget()((MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->
hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame") ? static_cast<
void> (0) : __assert_fail ("MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->hasReservedCallFrame(MF) && \"Cannot use SP to access the emergency spill slot in \" \"functions without a reserved call frame\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 503, __PRETTY_FUNCTION__))
499 .getSubtargetImpl()((MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->
hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame") ? static_cast<
void> (0) : __assert_fail ("MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->hasReservedCallFrame(MF) && \"Cannot use SP to access the emergency spill slot in \" \"functions without a reserved call frame\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 503, __PRETTY_FUNCTION__))
500 ->getFrameLowering()((MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->
hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame") ? static_cast<
void> (0) : __assert_fail ("MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->hasReservedCallFrame(MF) && \"Cannot use SP to access the emergency spill slot in \" \"functions without a reserved call frame\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 503, __PRETTY_FUNCTION__))
501 ->hasReservedCallFrame(MF) &&((MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->
hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame") ? static_cast<
void> (0) : __assert_fail ("MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->hasReservedCallFrame(MF) && \"Cannot use SP to access the emergency spill slot in \" \"functions without a reserved call frame\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 503, __PRETTY_FUNCTION__))
502 "Cannot use SP to access the emergency spill slot in "((MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->
hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame") ? static_cast<
void> (0) : __assert_fail ("MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->hasReservedCallFrame(MF) && \"Cannot use SP to access the emergency spill slot in \" \"functions without a reserved call frame\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 503, __PRETTY_FUNCTION__))
503 "functions without a reserved call frame")((MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->
hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in "
"functions without a reserved call frame") ? static_cast<
void> (0) : __assert_fail ("MF.getTarget() .getSubtargetImpl() ->getFrameLowering() ->hasReservedCallFrame(MF) && \"Cannot use SP to access the emergency spill slot in \" \"functions without a reserved call frame\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 503, __PRETTY_FUNCTION__))
;
504 assert(!MF.getFrameInfo()->hasVarSizedObjects() &&((!MF.getFrameInfo()->hasVarSizedObjects() && "Cannot use SP to access the emergency spill slot in "
"functions with variable sized frame objects") ? static_cast
<void> (0) : __assert_fail ("!MF.getFrameInfo()->hasVarSizedObjects() && \"Cannot use SP to access the emergency spill slot in \" \"functions with variable sized frame objects\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 506, __PRETTY_FUNCTION__))
505 "Cannot use SP to access the emergency spill slot in "((!MF.getFrameInfo()->hasVarSizedObjects() && "Cannot use SP to access the emergency spill slot in "
"functions with variable sized frame objects") ? static_cast
<void> (0) : __assert_fail ("!MF.getFrameInfo()->hasVarSizedObjects() && \"Cannot use SP to access the emergency spill slot in \" \"functions with variable sized frame objects\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 506, __PRETTY_FUNCTION__))
506 "functions with variable sized frame objects")((!MF.getFrameInfo()->hasVarSizedObjects() && "Cannot use SP to access the emergency spill slot in "
"functions with variable sized frame objects") ? static_cast
<void> (0) : __assert_fail ("!MF.getFrameInfo()->hasVarSizedObjects() && \"Cannot use SP to access the emergency spill slot in \" \"functions with variable sized frame objects\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 506, __PRETTY_FUNCTION__))
;
507 }
508#endif // NDEBUG
509
510 // Special handling of dbg_value instructions.
511 if (MI.isDebugValue()) {
512 MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/);
513 MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
514 return;
515 }
516
517 // Modify MI as necessary to handle as much of 'Offset' as possible
518 assert(AFI->isThumbFunction() &&((AFI->isThumbFunction() && "This eliminateFrameIndex only supports Thumb1!"
) ? static_cast<void> (0) : __assert_fail ("AFI->isThumbFunction() && \"This eliminateFrameIndex only supports Thumb1!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 519, __PRETTY_FUNCTION__))
519 "This eliminateFrameIndex only supports Thumb1!")((AFI->isThumbFunction() && "This eliminateFrameIndex only supports Thumb1!"
) ? static_cast<void> (0) : __assert_fail ("AFI->isThumbFunction() && \"This eliminateFrameIndex only supports Thumb1!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 519, __PRETTY_FUNCTION__))
;
520 if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
521 return;
522
523 // If we get here, the immediate doesn't fit into the instruction. We folded
524 // as much as possible above, handle the rest, providing a register that is
525 // SP+LargeImm.
526 assert(Offset && "This code isn't needed if offset already handled!")((Offset && "This code isn't needed if offset already handled!"
) ? static_cast<void> (0) : __assert_fail ("Offset && \"This code isn't needed if offset already handled!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 526, __PRETTY_FUNCTION__))
;
527
528 unsigned Opcode = MI.getOpcode();
529
530 // Remove predicate first.
531 int PIdx = MI.findFirstPredOperandIdx();
532 if (PIdx != -1)
533 removeOperands(MI, PIdx);
534
535 if (MI.mayLoad()) {
536 // Use the destination register to materialize sp + offset.
537 unsigned TmpReg = MI.getOperand(0).getReg();
538 bool UseRR = false;
539 if (Opcode == ARM::tLDRspi) {
540 if (FrameReg == ARM::SP)
541 emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
542 Offset, false, TII, *this);
543 else {
544 emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
545 UseRR = true;
546 }
547 } else {
548 emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
549 *this);
550 }
551
552 MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
553 MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
554 if (UseRR)
555 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
556 // register. The offset is already handled in the vreg value.
557 MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
558 false);
559 } else if (MI.mayStore()) {
560 VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
561 bool UseRR = false;
562
563 if (Opcode == ARM::tSTRspi) {
564 if (FrameReg == ARM::SP)
565 emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
566 Offset, false, TII, *this);
567 else {
568 emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
569 UseRR = true;
570 }
571 } else
572 emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
573 *this);
574 MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
575 MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
576 if (UseRR)
577 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
578 // register. The offset is already handled in the vreg value.
579 MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
580 false);
581 } else {
582 llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn224456/lib/Target/ARM/Thumb1RegisterInfo.cpp"
, 582)
;
583 }
584
585 // Add predicate back if it's needed.
586 if (MI.isPredicable())
587 AddDefaultPred(MIB);
588}