File: | lib/Target/X86/X86CallFrameOptimization.cpp |
Location: | line 311, column 12 |
Description: | Value stored to 'StackPtr' during its initialization is never read |
1 | //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file defines a pass that optimizes call sequences on x86. |
11 | // Currently, it converts movs of function parameters onto the stack into |
12 | // pushes. This is beneficial for two main reasons: |
13 | // 1) The push instruction encoding is much smaller than an esp-relative mov |
14 | // 2) It is possible to push memory arguments directly. So, if the |
15 | // the transformation is preformed pre-reg-alloc, it can help relieve |
16 | // register pressure. |
17 | // |
18 | //===----------------------------------------------------------------------===// |
19 | |
20 | #include <algorithm> |
21 | |
22 | #include "X86.h" |
23 | #include "X86InstrInfo.h" |
24 | #include "X86Subtarget.h" |
25 | #include "X86MachineFunctionInfo.h" |
26 | #include "llvm/ADT/Statistic.h" |
27 | #include "llvm/CodeGen/MachineFunctionPass.h" |
28 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/Passes.h" |
31 | #include "llvm/IR/Function.h" |
32 | #include "llvm/Support/Debug.h" |
33 | #include "llvm/Support/raw_ostream.h" |
34 | #include "llvm/Target/TargetInstrInfo.h" |
35 | |
36 | using namespace llvm; |
37 | |
38 | #define DEBUG_TYPE"x86-cf-opt" "x86-cf-opt" |
39 | |
40 | static cl::opt<bool> |
41 | NoX86CFOpt("no-x86-call-frame-opt", |
42 | cl::desc("Avoid optimizing x86 call frames for size"), |
43 | cl::init(false), cl::Hidden); |
44 | |
45 | namespace { |
46 | class X86CallFrameOptimization : public MachineFunctionPass { |
47 | public: |
48 | X86CallFrameOptimization() : MachineFunctionPass(ID) {} |
49 | |
50 | bool runOnMachineFunction(MachineFunction &MF) override; |
51 | |
52 | private: |
53 | // Information we know about a particular call site |
54 | struct CallContext { |
55 | CallContext() |
56 | : Call(nullptr), SPCopy(nullptr), ExpectedDist(0), |
57 | MovVector(4, nullptr), NoStackParams(false), UsePush(false){}; |
58 | |
59 | // Actuall call instruction |
60 | MachineInstr *Call; |
61 | |
62 | // A copy of the stack pointer |
63 | MachineInstr *SPCopy; |
64 | |
65 | // The total displacement of all passed parameters |
66 | int64_t ExpectedDist; |
67 | |
68 | // The sequence of movs used to pass the parameters |
69 | SmallVector<MachineInstr *, 4> MovVector; |
70 | |
71 | // True if this call site has no stack parameters |
72 | bool NoStackParams; |
73 | |
74 | // True of this callsite can use push instructions |
75 | bool UsePush; |
76 | }; |
77 | |
78 | typedef DenseMap<MachineInstr *, CallContext> ContextMap; |
79 | |
80 | bool isLegal(MachineFunction &MF); |
81 | |
82 | bool isProfitable(MachineFunction &MF, ContextMap &CallSeqMap); |
83 | |
84 | void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB, |
85 | MachineBasicBlock::iterator I, CallContext &Context); |
86 | |
87 | bool adjustCallSequence(MachineFunction &MF, MachineBasicBlock::iterator I, |
88 | const CallContext &Context); |
89 | |
90 | MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup, |
91 | unsigned Reg); |
92 | |
93 | enum InstClassification { Convert, Skip, Exit }; |
94 | |
95 | InstClassification classifyInstruction(MachineBasicBlock &MBB, |
96 | MachineBasicBlock::iterator MI, |
97 | const X86RegisterInfo &RegInfo, |
98 | DenseSet<unsigned int> &UsedRegs); |
99 | |
100 | const char *getPassName() const override { return "X86 Optimize Call Frame"; } |
101 | |
102 | const TargetInstrInfo *TII; |
103 | const TargetFrameLowering *TFL; |
104 | const MachineRegisterInfo *MRI; |
105 | static char ID; |
106 | }; |
107 | |
108 | char X86CallFrameOptimization::ID = 0; |
109 | } |
110 | |
111 | FunctionPass *llvm::createX86CallFrameOptimization() { |
112 | return new X86CallFrameOptimization(); |
113 | } |
114 | |
115 | // This checks whether the transformation is legal. |
116 | // Also returns false in cases where it's potentially legal, but |
117 | // we don't even want to try. |
118 | bool X86CallFrameOptimization::isLegal(MachineFunction &MF) { |
119 | if (NoX86CFOpt.getValue()) |
120 | return false; |
121 | |
122 | // We currently only support call sequences where *all* parameters. |
123 | // are passed on the stack. |
124 | // No point in running this in 64-bit mode, since some arguments are |
125 | // passed in-register in all common calling conventions, so the pattern |
126 | // we're looking for will never match. |
127 | const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); |
128 | if (STI.is64Bit()) |
129 | return false; |
130 | |
131 | // You would expect straight-line code between call-frame setup and |
132 | // call-frame destroy. You would be wrong. There are circumstances (e.g. |
133 | // CMOV_GR8 expansion of a select that feeds a function call!) where we can |
134 | // end up with the setup and the destroy in different basic blocks. |
135 | // This is bad, and breaks SP adjustment. |
136 | // So, check that all of the frames in the function are closed inside |
137 | // the same block, and, for good measure, that there are no nested frames. |
138 | unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); |
139 | unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); |
140 | for (MachineBasicBlock &BB : MF) { |
141 | bool InsideFrameSequence = false; |
142 | for (MachineInstr &MI : BB) { |
143 | if (MI.getOpcode() == FrameSetupOpcode) { |
144 | if (InsideFrameSequence) |
145 | return false; |
146 | InsideFrameSequence = true; |
147 | } else if (MI.getOpcode() == FrameDestroyOpcode) { |
148 | if (!InsideFrameSequence) |
149 | return false; |
150 | InsideFrameSequence = false; |
151 | } |
152 | } |
153 | |
154 | if (InsideFrameSequence) |
155 | return false; |
156 | } |
157 | |
158 | return true; |
159 | } |
160 | |
161 | // Check whether this trasnformation is profitable for a particular |
162 | // function - in terms of code size. |
163 | bool X86CallFrameOptimization::isProfitable(MachineFunction &MF, |
164 | ContextMap &CallSeqMap) { |
165 | // This transformation is always a win when we do not expect to have |
166 | // a reserved call frame. Under other circumstances, it may be either |
167 | // a win or a loss, and requires a heuristic. |
168 | bool CannotReserveFrame = MF.getFrameInfo()->hasVarSizedObjects(); |
169 | if (CannotReserveFrame) |
170 | return true; |
171 | |
172 | // Don't do this when not optimizing for size. |
173 | bool OptForSize = |
174 | MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) || |
175 | MF.getFunction()->hasFnAttribute(Attribute::MinSize); |
176 | |
177 | if (!OptForSize) |
178 | return false; |
179 | |
180 | unsigned StackAlign = TFL->getStackAlignment(); |
181 | |
182 | int64_t Advantage = 0; |
183 | for (auto CC : CallSeqMap) { |
184 | // Call sites where no parameters are passed on the stack |
185 | // do not affect the cost, since there needs to be no |
186 | // stack adjustment. |
187 | if (CC.second.NoStackParams) |
188 | continue; |
189 | |
190 | if (!CC.second.UsePush) { |
191 | // If we don't use pushes for a particular call site, |
192 | // we pay for not having a reserved call frame with an |
193 | // additional sub/add esp pair. The cost is ~3 bytes per instruction, |
194 | // depending on the size of the constant. |
195 | // TODO: Callee-pop functions should have a smaller penalty, because |
196 | // an add is needed even with a reserved call frame. |
197 | Advantage -= 6; |
198 | } else { |
199 | // We can use pushes. First, account for the fixed costs. |
200 | // We'll need a add after the call. |
201 | Advantage -= 3; |
202 | // If we have to realign the stack, we'll also need and sub before |
203 | if (CC.second.ExpectedDist % StackAlign) |
204 | Advantage -= 3; |
205 | // Now, for each push, we save ~3 bytes. For small constants, we actually, |
206 | // save more (up to 5 bytes), but 3 should be a good approximation. |
207 | Advantage += (CC.second.ExpectedDist / 4) * 3; |
208 | } |
209 | } |
210 | |
211 | return (Advantage >= 0); |
212 | } |
213 | |
214 | bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) { |
215 | TII = MF.getSubtarget().getInstrInfo(); |
216 | TFL = MF.getSubtarget().getFrameLowering(); |
217 | MRI = &MF.getRegInfo(); |
218 | |
219 | if (!isLegal(MF)) |
220 | return false; |
221 | |
222 | unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); |
223 | |
224 | bool Changed = false; |
225 | |
226 | ContextMap CallSeqMap; |
227 | |
228 | for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) |
229 | for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) |
230 | if (I->getOpcode() == FrameSetupOpcode) { |
231 | CallContext &Context = CallSeqMap[I]; |
232 | collectCallInfo(MF, *BB, I, Context); |
233 | } |
234 | |
235 | if (!isProfitable(MF, CallSeqMap)) |
236 | return false; |
237 | |
238 | for (auto CC : CallSeqMap) |
239 | if (CC.second.UsePush) |
240 | Changed |= adjustCallSequence(MF, CC.first, CC.second); |
241 | |
242 | return Changed; |
243 | } |
244 | |
245 | X86CallFrameOptimization::InstClassification |
246 | X86CallFrameOptimization::classifyInstruction( |
247 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
248 | const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) { |
249 | if (MI == MBB.end()) |
250 | return Exit; |
251 | |
252 | // The instructions we actually care about are movs onto the stack |
253 | int Opcode = MI->getOpcode(); |
254 | if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr) |
255 | return Convert; |
256 | |
257 | // Not all calling conventions have only stack MOVs between the stack |
258 | // adjust and the call. |
259 | |
260 | // We want to tolerate other instructions, to cover more cases. |
261 | // In particular: |
262 | // a) PCrel calls, where we expect an additional COPY of the basereg. |
263 | // b) Passing frame-index addresses. |
264 | // c) Calling conventions that have inreg parameters. These generate |
265 | // both copies and movs into registers. |
266 | // To avoid creating lots of special cases, allow any instruction |
267 | // that does not write into memory, does not def or use the stack |
268 | // pointer, and does not def any register that was used by a preceding |
269 | // push. |
270 | // (Reading from memory is allowed, even if referenced through a |
271 | // frame index, since these will get adjusted properly in PEI) |
272 | |
273 | // The reason for the last condition is that the pushes can't replace |
274 | // the movs in place, because the order must be reversed. |
275 | // So if we have a MOV32mr that uses EDX, then an instruction that defs |
276 | // EDX, and then the call, after the transformation the push will use |
277 | // the modified version of EDX, and not the original one. |
278 | // Since we are still in SSA form at this point, we only need to |
279 | // make sure we don't clobber any *physical* registers that were |
280 | // used by an earlier mov that will become a push. |
281 | |
282 | if (MI->isCall() || MI->mayStore()) |
283 | return Exit; |
284 | |
285 | for (const MachineOperand &MO : MI->operands()) { |
286 | if (!MO.isReg()) |
287 | continue; |
288 | unsigned int Reg = MO.getReg(); |
289 | if (!RegInfo.isPhysicalRegister(Reg)) |
290 | continue; |
291 | if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister())) |
292 | return Exit; |
293 | if (MO.isDef()) { |
294 | for (unsigned int U : UsedRegs) |
295 | if (RegInfo.regsOverlap(Reg, U)) |
296 | return Exit; |
297 | } |
298 | } |
299 | |
300 | return Skip; |
301 | } |
302 | |
303 | void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF, |
304 | MachineBasicBlock &MBB, |
305 | MachineBasicBlock::iterator I, |
306 | CallContext &Context) { |
307 | // Check that this particular call sequence is amenable to the |
308 | // transformation. |
309 | const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>( |
310 | MF.getSubtarget().getRegisterInfo()); |
311 | unsigned StackPtr = RegInfo.getStackRegister(); |
Value stored to 'StackPtr' during its initialization is never read | |
312 | unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); |
313 | |
314 | // We expect to enter this at the beginning of a call sequence |
315 | assert(I->getOpcode() == TII->getCallFrameSetupOpcode())((I->getOpcode() == TII->getCallFrameSetupOpcode()) ? static_cast <void> (0) : __assert_fail ("I->getOpcode() == TII->getCallFrameSetupOpcode()" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn240924/lib/Target/X86/X86CallFrameOptimization.cpp" , 315, __PRETTY_FUNCTION__)); |
316 | MachineBasicBlock::iterator FrameSetup = I++; |
317 | |
318 | // How much do we adjust the stack? This puts an upper bound on |
319 | // the number of parameters actually passed on it. |
320 | unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4; |
321 | |
322 | // A zero adjustment means no stack parameters |
323 | if (!MaxAdjust) { |
324 | Context.NoStackParams = true; |
325 | return; |
326 | } |
327 | |
328 | // For globals in PIC mode, we can have some LEAs here. |
329 | // Ignore them, they don't bother us. |
330 | // TODO: Extend this to something that covers more cases. |
331 | while (I->getOpcode() == X86::LEA32r) |
332 | ++I; |
333 | |
334 | // We expect a copy instruction here. |
335 | // TODO: The copy instruction is a lowering artifact. |
336 | // We should also support a copy-less version, where the stack |
337 | // pointer is used directly. |
338 | if (!I->isCopy() || !I->getOperand(0).isReg()) |
339 | return; |
340 | Context.SPCopy = I++; |
341 | StackPtr = Context.SPCopy->getOperand(0).getReg(); |
342 | |
343 | // Scan the call setup sequence for the pattern we're looking for. |
344 | // We only handle a simple case - a sequence of MOV32mi or MOV32mr |
345 | // instructions, that push a sequence of 32-bit values onto the stack, with |
346 | // no gaps between them. |
347 | if (MaxAdjust > 4) |
348 | Context.MovVector.resize(MaxAdjust, nullptr); |
349 | |
350 | InstClassification Classification; |
351 | DenseSet<unsigned int> UsedRegs; |
352 | |
353 | while ((Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs)) != |
354 | Exit) { |
355 | if (Classification == Skip) { |
356 | ++I; |
357 | continue; |
358 | } |
359 | |
360 | // We know the instruction is a MOV32mi/MOV32mr. |
361 | // We only want movs of the form: |
362 | // movl imm/r32, k(%esp) |
363 | // If we run into something else, bail. |
364 | // Note that AddrBaseReg may, counter to its name, not be a register, |
365 | // but rather a frame index. |
366 | // TODO: Support the fi case. This should probably work now that we |
367 | // have the infrastructure to track the stack pointer within a call |
368 | // sequence. |
369 | if (!I->getOperand(X86::AddrBaseReg).isReg() || |
370 | (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) || |
371 | !I->getOperand(X86::AddrScaleAmt).isImm() || |
372 | (I->getOperand(X86::AddrScaleAmt).getImm() != 1) || |
373 | (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) || |
374 | (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) || |
375 | !I->getOperand(X86::AddrDisp).isImm()) |
376 | return; |
377 | |
378 | int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm(); |
379 | assert(StackDisp >= 0 &&((StackDisp >= 0 && "Negative stack displacement when passing parameters" ) ? static_cast<void> (0) : __assert_fail ("StackDisp >= 0 && \"Negative stack displacement when passing parameters\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn240924/lib/Target/X86/X86CallFrameOptimization.cpp" , 380, __PRETTY_FUNCTION__)) |
380 | "Negative stack displacement when passing parameters")((StackDisp >= 0 && "Negative stack displacement when passing parameters" ) ? static_cast<void> (0) : __assert_fail ("StackDisp >= 0 && \"Negative stack displacement when passing parameters\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn240924/lib/Target/X86/X86CallFrameOptimization.cpp" , 380, __PRETTY_FUNCTION__)); |
381 | |
382 | // We really don't want to consider the unaligned case. |
383 | if (StackDisp % 4) |
384 | return; |
385 | StackDisp /= 4; |
386 | |
387 | assert((size_t)StackDisp < Context.MovVector.size() &&(((size_t)StackDisp < Context.MovVector.size() && "Function call has more parameters than the stack is adjusted for." ) ? static_cast<void> (0) : __assert_fail ("(size_t)StackDisp < Context.MovVector.size() && \"Function call has more parameters than the stack is adjusted for.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn240924/lib/Target/X86/X86CallFrameOptimization.cpp" , 388, __PRETTY_FUNCTION__)) |
388 | "Function call has more parameters than the stack is adjusted for.")(((size_t)StackDisp < Context.MovVector.size() && "Function call has more parameters than the stack is adjusted for." ) ? static_cast<void> (0) : __assert_fail ("(size_t)StackDisp < Context.MovVector.size() && \"Function call has more parameters than the stack is adjusted for.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn240924/lib/Target/X86/X86CallFrameOptimization.cpp" , 388, __PRETTY_FUNCTION__)); |
389 | |
390 | // If the same stack slot is being filled twice, something's fishy. |
391 | if (Context.MovVector[StackDisp] != nullptr) |
392 | return; |
393 | Context.MovVector[StackDisp] = I; |
394 | |
395 | for (const MachineOperand &MO : I->uses()) { |
396 | if (!MO.isReg()) |
397 | continue; |
398 | unsigned int Reg = MO.getReg(); |
399 | if (RegInfo.isPhysicalRegister(Reg)) |
400 | UsedRegs.insert(Reg); |
401 | } |
402 | |
403 | ++I; |
404 | } |
405 | |
406 | // We now expect the end of the sequence. If we stopped early, |
407 | // or reached the end of the block without finding a call, bail. |
408 | if (I == MBB.end() || !I->isCall()) |
409 | return; |
410 | |
411 | Context.Call = I; |
412 | if ((++I)->getOpcode() != FrameDestroyOpcode) |
413 | return; |
414 | |
415 | // Now, go through the vector, and see that we don't have any gaps, |
416 | // but only a series of 32-bit MOVs. |
417 | auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end(); |
418 | for (; MMI != MME; ++MMI, Context.ExpectedDist += 4) |
419 | if (*MMI == nullptr) |
420 | break; |
421 | |
422 | // If the call had no parameters, do nothing |
423 | if (MMI == Context.MovVector.begin()) |
424 | return; |
425 | |
426 | // We are either at the last parameter, or a gap. |
427 | // Make sure it's not a gap |
428 | for (; MMI != MME; ++MMI) |
429 | if (*MMI != nullptr) |
430 | return; |
431 | |
432 | Context.UsePush = true; |
433 | return; |
434 | } |
435 | |
436 | bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, |
437 | MachineBasicBlock::iterator I, |
438 | const CallContext &Context) { |
439 | // Ok, we can in fact do the transformation for this call. |
440 | // Do not remove the FrameSetup instruction, but adjust the parameters. |
441 | // PEI will end up finalizing the handling of this. |
442 | MachineBasicBlock::iterator FrameSetup = I; |
443 | MachineBasicBlock &MBB = *(I->getParent()); |
444 | FrameSetup->getOperand(1).setImm(Context.ExpectedDist); |
445 | |
446 | DebugLoc DL = I->getDebugLoc(); |
447 | // Now, iterate through the vector in reverse order, and replace the movs |
448 | // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to |
449 | // replace uses. |
450 | for (int Idx = (Context.ExpectedDist / 4) - 1; Idx >= 0; --Idx) { |
451 | MachineBasicBlock::iterator MOV = *Context.MovVector[Idx]; |
452 | MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands); |
453 | if (MOV->getOpcode() == X86::MOV32mi) { |
454 | unsigned PushOpcode = X86::PUSHi32; |
455 | // If the operand is a small (8-bit) immediate, we can use a |
456 | // PUSH instruction with a shorter encoding. |
457 | // Note that isImm() may fail even though this is a MOVmi, because |
458 | // the operand can also be a symbol. |
459 | if (PushOp.isImm()) { |
460 | int64_t Val = PushOp.getImm(); |
461 | if (isInt<8>(Val)) |
462 | PushOpcode = X86::PUSH32i8; |
463 | } |
464 | BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).addOperand(PushOp); |
465 | } else { |
466 | unsigned int Reg = PushOp.getReg(); |
467 | |
468 | // If PUSHrmm is not slow on this target, try to fold the source of the |
469 | // push into the instruction. |
470 | const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>(); |
471 | bool SlowPUSHrmm = ST.isAtom() || ST.isSLM(); |
472 | |
473 | // Check that this is legal to fold. Right now, we're extremely |
474 | // conservative about that. |
475 | MachineInstr *DefMov = nullptr; |
476 | if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) { |
477 | MachineInstr *Push = |
478 | BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32rmm)); |
479 | |
480 | unsigned NumOps = DefMov->getDesc().getNumOperands(); |
481 | for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i) |
482 | Push->addOperand(DefMov->getOperand(i)); |
483 | |
484 | DefMov->eraseFromParent(); |
485 | } else { |
486 | BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32r)) |
487 | .addReg(Reg) |
488 | .getInstr(); |
489 | } |
490 | } |
491 | |
492 | MBB.erase(MOV); |
493 | } |
494 | |
495 | // The stack-pointer copy is no longer used in the call sequences. |
496 | // There should not be any other users, but we can't commit to that, so: |
497 | if (MRI->use_empty(Context.SPCopy->getOperand(0).getReg())) |
498 | Context.SPCopy->eraseFromParent(); |
499 | |
500 | // Once we've done this, we need to make sure PEI doesn't assume a reserved |
501 | // frame. |
502 | X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); |
503 | FuncInfo->setHasPushSequences(true); |
504 | |
505 | return true; |
506 | } |
507 | |
508 | MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush( |
509 | MachineBasicBlock::iterator FrameSetup, unsigned Reg) { |
510 | // Do an extremely restricted form of load folding. |
511 | // ISel will often create patterns like: |
512 | // movl 4(%edi), %eax |
513 | // movl 8(%edi), %ecx |
514 | // movl 12(%edi), %edx |
515 | // movl %edx, 8(%esp) |
516 | // movl %ecx, 4(%esp) |
517 | // movl %eax, (%esp) |
518 | // call |
519 | // Get rid of those with prejudice. |
520 | if (!TargetRegisterInfo::isVirtualRegister(Reg)) |
521 | return nullptr; |
522 | |
523 | // Make sure this is the only use of Reg. |
524 | if (!MRI->hasOneNonDBGUse(Reg)) |
525 | return nullptr; |
526 | |
527 | MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg); |
528 | |
529 | // Make sure the def is a MOV from memory. |
530 | // If the def is an another block, give up. |
531 | if (DefMI->getOpcode() != X86::MOV32rm || |
532 | DefMI->getParent() != FrameSetup->getParent()) |
533 | return nullptr; |
534 | |
535 | // Now, make sure everything else up until the ADJCALLSTACK is a sequence |
536 | // of MOVs. To be less conservative would require duplicating a lot of the |
537 | // logic from PeepholeOptimizer. |
538 | // FIXME: A possibly better approach would be to teach the PeepholeOptimizer |
539 | // to be smarter about folding into pushes. |
540 | for (auto I = DefMI; I != FrameSetup; ++I) |
541 | if (I->getOpcode() != X86::MOV32rm) |
542 | return nullptr; |
543 | |
544 | return DefMI; |
545 | } |