Bug Summary

File:lib/Target/PowerPC/PPCInstrInfo.cpp
Location:line 531, column 7
Description:Called C++ object pointer is null

Annotated Source Code

1//===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the PowerPC implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "PPCInstrInfo.h"
15#include "MCTargetDesc/PPCPredicates.h"
16#include "PPC.h"
17#include "PPCHazardRecognizers.h"
18#include "PPCInstrBuilder.h"
19#include "PPCMachineFunctionInfo.h"
20#include "PPCTargetMachine.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/LiveIntervalAnalysis.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunctionPass.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineMemOperand.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/CodeGen/ScheduleDAG.h"
31#include "llvm/CodeGen/SlotIndexes.h"
32#include "llvm/CodeGen/StackMaps.h"
33#include "llvm/MC/MCAsmInfo.h"
34#include "llvm/MC/MCInst.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/TargetRegistry.h"
39#include "llvm/Support/raw_ostream.h"
40
41using namespace llvm;
42
43#define DEBUG_TYPE"ppc-instr-info" "ppc-instr-info"
44
45#define GET_INSTRMAP_INFO
46#define GET_INSTRINFO_CTOR_DTOR
47#include "PPCGenInstrInfo.inc"
48
49static cl::
50opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
51 cl::desc("Disable analysis for CTR loops"));
52
53static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
54cl::desc("Disable compare instruction optimization"), cl::Hidden);
55
56static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy",
57cl::desc("Causes the backend to crash instead of generating a nop VSX copy"),
58cl::Hidden);
59
60static cl::opt<bool>
61UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden,
62 cl::desc("Use the old (incorrect) instruction latency calculation"));
63
64// Pin the vtable to this file.
65void PPCInstrInfo::anchor() {}
66
67PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI)
68 : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
69 Subtarget(STI), RI(STI.getTargetMachine()) {}
70
71/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
72/// this target when scheduling the DAG.
73ScheduleHazardRecognizer *
74PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
75 const ScheduleDAG *DAG) const {
76 unsigned Directive =
77 static_cast<const PPCSubtarget *>(STI)->getDarwinDirective();
78 if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
79 Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
80 const InstrItineraryData *II =
81 static_cast<const PPCSubtarget *>(STI)->getInstrItineraryData();
82 return new ScoreboardHazardRecognizer(II, DAG);
83 }
84
85 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
86}
87
88/// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
89/// to use for this target when scheduling the DAG.
90ScheduleHazardRecognizer *
91PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
92 const ScheduleDAG *DAG) const {
93 unsigned Directive =
94 DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
95
96 // FIXME: Leaving this as-is until we have POWER9 scheduling info
97 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
98 return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
99
100 // Most subtargets use a PPC970 recognizer.
101 if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
102 Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
103 assert(DAG->TII && "No InstrInfo?")((DAG->TII && "No InstrInfo?") ? static_cast<void
> (0) : __assert_fail ("DAG->TII && \"No InstrInfo?\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 103, __PRETTY_FUNCTION__))
;
104
105 return new PPCHazardRecognizer970(*DAG);
106 }
107
108 return new ScoreboardHazardRecognizer(II, DAG);
109}
110
111unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
112 const MachineInstr &MI,
113 unsigned *PredCost) const {
114 if (!ItinData || UseOldLatencyCalc)
115 return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost);
116
117 // The default implementation of getInstrLatency calls getStageLatency, but
118 // getStageLatency does not do the right thing for us. While we have
119 // itinerary, most cores are fully pipelined, and so the itineraries only
120 // express the first part of the pipeline, not every stage. Instead, we need
121 // to use the listed output operand cycle number (using operand 0 here, which
122 // is an output).
123
124 unsigned Latency = 1;
125 unsigned DefClass = MI.getDesc().getSchedClass();
126 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
127 const MachineOperand &MO = MI.getOperand(i);
128 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
129 continue;
130
131 int Cycle = ItinData->getOperandCycle(DefClass, i);
132 if (Cycle < 0)
133 continue;
134
135 Latency = std::max(Latency, (unsigned) Cycle);
136 }
137
138 return Latency;
139}
140
141int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
142 const MachineInstr &DefMI, unsigned DefIdx,
143 const MachineInstr &UseMI,
144 unsigned UseIdx) const {
145 int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
146 UseMI, UseIdx);
147
148 if (!DefMI.getParent())
149 return Latency;
150
151 const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
152 unsigned Reg = DefMO.getReg();
153
154 bool IsRegCR;
155 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
156 const MachineRegisterInfo *MRI =
157 &DefMI.getParent()->getParent()->getRegInfo();
158 IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
159 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
160 } else {
161 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
162 PPC::CRBITRCRegClass.contains(Reg);
163 }
164
165 if (UseMI.isBranch() && IsRegCR) {
166 if (Latency < 0)
167 Latency = getInstrLatency(ItinData, DefMI);
168
169 // On some cores, there is an additional delay between writing to a condition
170 // register, and using it from a branch.
171 unsigned Directive = Subtarget.getDarwinDirective();
172 switch (Directive) {
173 default: break;
174 case PPC::DIR_7400:
175 case PPC::DIR_750:
176 case PPC::DIR_970:
177 case PPC::DIR_E5500:
178 case PPC::DIR_PWR4:
179 case PPC::DIR_PWR5:
180 case PPC::DIR_PWR5X:
181 case PPC::DIR_PWR6:
182 case PPC::DIR_PWR6X:
183 case PPC::DIR_PWR7:
184 case PPC::DIR_PWR8:
185 // FIXME: Is this needed for POWER9?
186 Latency += 2;
187 break;
188 }
189 }
190
191 return Latency;
192}
193
194// This function does not list all associative and commutative operations, but
195// only those worth feeding through the machine combiner in an attempt to
196// reduce the critical path. Mostly, this means floating-point operations,
197// because they have high latencies (compared to other operations, such and
198// and/or, which are also associative and commutative, but have low latencies).
199bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
200 switch (Inst.getOpcode()) {
201 // FP Add:
202 case PPC::FADD:
203 case PPC::FADDS:
204 // FP Multiply:
205 case PPC::FMUL:
206 case PPC::FMULS:
207 // Altivec Add:
208 case PPC::VADDFP:
209 // VSX Add:
210 case PPC::XSADDDP:
211 case PPC::XVADDDP:
212 case PPC::XVADDSP:
213 case PPC::XSADDSP:
214 // VSX Multiply:
215 case PPC::XSMULDP:
216 case PPC::XVMULDP:
217 case PPC::XVMULSP:
218 case PPC::XSMULSP:
219 // QPX Add:
220 case PPC::QVFADD:
221 case PPC::QVFADDS:
222 case PPC::QVFADDSs:
223 // QPX Multiply:
224 case PPC::QVFMUL:
225 case PPC::QVFMULS:
226 case PPC::QVFMULSs:
227 return true;
228 default:
229 return false;
230 }
231}
232
233bool PPCInstrInfo::getMachineCombinerPatterns(
234 MachineInstr &Root,
235 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
236 // Using the machine combiner in this way is potentially expensive, so
237 // restrict to when aggressive optimizations are desired.
238 if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive)
239 return false;
240
241 // FP reassociation is only legal when we don't need strict IEEE semantics.
242 if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath)
243 return false;
244
245 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
246}
247
248// Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
249bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
250 unsigned &SrcReg, unsigned &DstReg,
251 unsigned &SubIdx) const {
252 switch (MI.getOpcode()) {
253 default: return false;
254 case PPC::EXTSW:
255 case PPC::EXTSW_32_64:
256 SrcReg = MI.getOperand(1).getReg();
257 DstReg = MI.getOperand(0).getReg();
258 SubIdx = PPC::sub_32;
259 return true;
260 }
261}
262
263unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
264 int &FrameIndex) const {
265 // Note: This list must be kept consistent with LoadRegFromStackSlot.
266 switch (MI.getOpcode()) {
267 default: break;
268 case PPC::LD:
269 case PPC::LWZ:
270 case PPC::LFS:
271 case PPC::LFD:
272 case PPC::RESTORE_CR:
273 case PPC::RESTORE_CRBIT:
274 case PPC::LVX:
275 case PPC::LXVD2X:
276 case PPC::QVLFDX:
277 case PPC::QVLFSXs:
278 case PPC::QVLFDXb:
279 case PPC::RESTORE_VRSAVE:
280 // Check for the operands added by addFrameReference (the immediate is the
281 // offset which defaults to 0).
282 if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
283 MI.getOperand(2).isFI()) {
284 FrameIndex = MI.getOperand(2).getIndex();
285 return MI.getOperand(0).getReg();
286 }
287 break;
288 }
289 return 0;
290}
291
292unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
293 int &FrameIndex) const {
294 // Note: This list must be kept consistent with StoreRegToStackSlot.
295 switch (MI.getOpcode()) {
296 default: break;
297 case PPC::STD:
298 case PPC::STW:
299 case PPC::STFS:
300 case PPC::STFD:
301 case PPC::SPILL_CR:
302 case PPC::SPILL_CRBIT:
303 case PPC::STVX:
304 case PPC::STXVD2X:
305 case PPC::QVSTFDX:
306 case PPC::QVSTFSXs:
307 case PPC::QVSTFDXb:
308 case PPC::SPILL_VRSAVE:
309 // Check for the operands added by addFrameReference (the immediate is the
310 // offset which defaults to 0).
311 if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
312 MI.getOperand(2).isFI()) {
313 FrameIndex = MI.getOperand(2).getIndex();
314 return MI.getOperand(0).getReg();
315 }
316 break;
317 }
318 return 0;
319}
320
321MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
322 unsigned OpIdx1,
323 unsigned OpIdx2) const {
324 MachineFunction &MF = *MI.getParent()->getParent();
325
326 // Normal instructions can be commuted the obvious way.
327 if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo)
328 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
329 // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
330 // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
331 // changing the relative order of the mask operands might change what happens
332 // to the high-bits of the mask (and, thus, the result).
333
334 // Cannot commute if it has a non-zero rotate count.
335 if (MI.getOperand(3).getImm() != 0)
336 return nullptr;
337
338 // If we have a zero rotate count, we have:
339 // M = mask(MB,ME)
340 // Op0 = (Op1 & ~M) | (Op2 & M)
341 // Change this to:
342 // M = mask((ME+1)&31, (MB-1)&31)
343 // Op0 = (Op2 & ~M) | (Op1 & M)
344
345 // Swap op1/op2
346 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&((((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 &&
OpIdx2 == 1)) && "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo."
) ? static_cast<void> (0) : __assert_fail ("((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) && \"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 347, __PRETTY_FUNCTION__))
347 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.")((((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 &&
OpIdx2 == 1)) && "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo."
) ? static_cast<void> (0) : __assert_fail ("((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) && \"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 347, __PRETTY_FUNCTION__))
;
348 unsigned Reg0 = MI.getOperand(0).getReg();
349 unsigned Reg1 = MI.getOperand(1).getReg();
350 unsigned Reg2 = MI.getOperand(2).getReg();
351 unsigned SubReg1 = MI.getOperand(1).getSubReg();
352 unsigned SubReg2 = MI.getOperand(2).getSubReg();
353 bool Reg1IsKill = MI.getOperand(1).isKill();
354 bool Reg2IsKill = MI.getOperand(2).isKill();
355 bool ChangeReg0 = false;
356 // If machine instrs are no longer in two-address forms, update
357 // destination register as well.
358 if (Reg0 == Reg1) {
359 // Must be two address instruction!
360 assert(MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&((MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!") ? static_cast<void
> (0) : __assert_fail ("MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) && \"Expecting a two-address instruction!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 361, __PRETTY_FUNCTION__))
361 "Expecting a two-address instruction!")((MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!") ? static_cast<void
> (0) : __assert_fail ("MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) && \"Expecting a two-address instruction!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 361, __PRETTY_FUNCTION__))
;
362 assert(MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch")((MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch"
) ? static_cast<void> (0) : __assert_fail ("MI.getOperand(0).getSubReg() == SubReg1 && \"Tied subreg mismatch\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 362, __PRETTY_FUNCTION__))
;
363 Reg2IsKill = false;
364 ChangeReg0 = true;
365 }
366
367 // Masks.
368 unsigned MB = MI.getOperand(4).getImm();
369 unsigned ME = MI.getOperand(5).getImm();
370
371 // We can't commute a trivial mask (there is no way to represent an all-zero
372 // mask).
373 if (MB == 0 && ME == 31)
374 return nullptr;
375
376 if (NewMI) {
377 // Create a new instruction.
378 unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
379 bool Reg0IsDead = MI.getOperand(0).isDead();
380 return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
381 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
382 .addReg(Reg2, getKillRegState(Reg2IsKill))
383 .addReg(Reg1, getKillRegState(Reg1IsKill))
384 .addImm((ME + 1) & 31)
385 .addImm((MB - 1) & 31);
386 }
387
388 if (ChangeReg0) {
389 MI.getOperand(0).setReg(Reg2);
390 MI.getOperand(0).setSubReg(SubReg2);
391 }
392 MI.getOperand(2).setReg(Reg1);
393 MI.getOperand(1).setReg(Reg2);
394 MI.getOperand(2).setSubReg(SubReg1);
395 MI.getOperand(1).setSubReg(SubReg2);
396 MI.getOperand(2).setIsKill(Reg1IsKill);
397 MI.getOperand(1).setIsKill(Reg2IsKill);
398
399 // Swap the mask around.
400 MI.getOperand(4).setImm((ME + 1) & 31);
401 MI.getOperand(5).setImm((MB - 1) & 31);
402 return &MI;
403}
404
405bool PPCInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
406 unsigned &SrcOpIdx2) const {
407 // For VSX A-Type FMA instructions, it is the first two operands that can be
408 // commuted, however, because the non-encoded tied input operand is listed
409 // first, the operands to swap are actually the second and third.
410
411 int AltOpc = PPC::getAltVSXFMAOpcode(MI.getOpcode());
412 if (AltOpc == -1)
413 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
414
415 // The commutable operand indices are 2 and 3. Return them in SrcOpIdx1
416 // and SrcOpIdx2.
417 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
418}
419
420void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
421 MachineBasicBlock::iterator MI) const {
422 // This function is used for scheduling, and the nop wanted here is the type
423 // that terminates dispatch groups on the POWER cores.
424 unsigned Directive = Subtarget.getDarwinDirective();
425 unsigned Opcode;
426 switch (Directive) {
427 default: Opcode = PPC::NOP; break;
428 case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break;
429 case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break;
430 case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */
431 // FIXME: Update when POWER9 scheduling model is ready.
432 case PPC::DIR_PWR9: Opcode = PPC::NOP_GT_PWR7; break;
433 }
434
435 DebugLoc DL;
436 BuildMI(MBB, MI, DL, get(Opcode));
437}
438
439/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
440void PPCInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
441 NopInst.setOpcode(PPC::NOP);
442}
443
444// Branch analysis.
445// Note: If the condition register is set to CTR or CTR8 then this is a
446// BDNZ (imm == 1) or BDZ (imm == 0) branch.
447bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
448 MachineBasicBlock *&FBB,
449 SmallVectorImpl<MachineOperand> &Cond,
450 bool AllowModify) const {
451 bool isPPC64 = Subtarget.isPPC64();
452
453 // If the block has no terminators, it just falls into the block after it.
454 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
455 if (I == MBB.end())
1
Taking false branch
456 return false;
457
458 if (!isUnpredicatedTerminator(*I))
2
Taking false branch
459 return false;
460
461 // Get the last instruction in the block.
462 MachineInstr *LastInst = I;
463
464 // If there is only one terminator instruction, process it.
465 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
3
Taking false branch
466 if (LastInst->getOpcode() == PPC::B) {
467 if (!LastInst->getOperand(0).isMBB())
468 return true;
469 TBB = LastInst->getOperand(0).getMBB();
470 return false;
471 } else if (LastInst->getOpcode() == PPC::BCC) {
472 if (!LastInst->getOperand(2).isMBB())
473 return true;
474 // Block ends with fall-through condbranch.
475 TBB = LastInst->getOperand(2).getMBB();
476 Cond.push_back(LastInst->getOperand(0));
477 Cond.push_back(LastInst->getOperand(1));
478 return false;
479 } else if (LastInst->getOpcode() == PPC::BC) {
480 if (!LastInst->getOperand(1).isMBB())
481 return true;
482 // Block ends with fall-through condbranch.
483 TBB = LastInst->getOperand(1).getMBB();
484 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
485 Cond.push_back(LastInst->getOperand(0));
486 return false;
487 } else if (LastInst->getOpcode() == PPC::BCn) {
488 if (!LastInst->getOperand(1).isMBB())
489 return true;
490 // Block ends with fall-through condbranch.
491 TBB = LastInst->getOperand(1).getMBB();
492 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
493 Cond.push_back(LastInst->getOperand(0));
494 return false;
495 } else if (LastInst->getOpcode() == PPC::BDNZ8 ||
496 LastInst->getOpcode() == PPC::BDNZ) {
497 if (!LastInst->getOperand(0).isMBB())
498 return true;
499 if (DisableCTRLoopAnal)
500 return true;
501 TBB = LastInst->getOperand(0).getMBB();
502 Cond.push_back(MachineOperand::CreateImm(1));
503 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
504 true));
505 return false;
506 } else if (LastInst->getOpcode() == PPC::BDZ8 ||
507 LastInst->getOpcode() == PPC::BDZ) {
508 if (!LastInst->getOperand(0).isMBB())
509 return true;
510 if (DisableCTRLoopAnal)
511 return true;
512 TBB = LastInst->getOperand(0).getMBB();
513 Cond.push_back(MachineOperand::CreateImm(0));
514 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
515 true));
516 return false;
517 }
518
519 // Otherwise, don't know what this is.
520 return true;
521 }
522
523 // Get the instruction before it if it's a terminator.
524 MachineInstr *SecondLastInst = I;
4
'SecondLastInst' initialized here
525
526 // If there are three terminators, we don't know what sort of block this is.
527 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
5
Assuming pointer value is null
6
Taking false branch
528 return true;
529
530 // If the block ends with PPC::B and PPC:BCC, handle it.
531 if (SecondLastInst->getOpcode() == PPC::BCC &&
7
Called C++ object pointer is null
532 LastInst->getOpcode() == PPC::B) {
533 if (!SecondLastInst->getOperand(2).isMBB() ||
534 !LastInst->getOperand(0).isMBB())
535 return true;
536 TBB = SecondLastInst->getOperand(2).getMBB();
537 Cond.push_back(SecondLastInst->getOperand(0));
538 Cond.push_back(SecondLastInst->getOperand(1));
539 FBB = LastInst->getOperand(0).getMBB();
540 return false;
541 } else if (SecondLastInst->getOpcode() == PPC::BC &&
542 LastInst->getOpcode() == PPC::B) {
543 if (!SecondLastInst->getOperand(1).isMBB() ||
544 !LastInst->getOperand(0).isMBB())
545 return true;
546 TBB = SecondLastInst->getOperand(1).getMBB();
547 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
548 Cond.push_back(SecondLastInst->getOperand(0));
549 FBB = LastInst->getOperand(0).getMBB();
550 return false;
551 } else if (SecondLastInst->getOpcode() == PPC::BCn &&
552 LastInst->getOpcode() == PPC::B) {
553 if (!SecondLastInst->getOperand(1).isMBB() ||
554 !LastInst->getOperand(0).isMBB())
555 return true;
556 TBB = SecondLastInst->getOperand(1).getMBB();
557 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
558 Cond.push_back(SecondLastInst->getOperand(0));
559 FBB = LastInst->getOperand(0).getMBB();
560 return false;
561 } else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 ||
562 SecondLastInst->getOpcode() == PPC::BDNZ) &&
563 LastInst->getOpcode() == PPC::B) {
564 if (!SecondLastInst->getOperand(0).isMBB() ||
565 !LastInst->getOperand(0).isMBB())
566 return true;
567 if (DisableCTRLoopAnal)
568 return true;
569 TBB = SecondLastInst->getOperand(0).getMBB();
570 Cond.push_back(MachineOperand::CreateImm(1));
571 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
572 true));
573 FBB = LastInst->getOperand(0).getMBB();
574 return false;
575 } else if ((SecondLastInst->getOpcode() == PPC::BDZ8 ||
576 SecondLastInst->getOpcode() == PPC::BDZ) &&
577 LastInst->getOpcode() == PPC::B) {
578 if (!SecondLastInst->getOperand(0).isMBB() ||
579 !LastInst->getOperand(0).isMBB())
580 return true;
581 if (DisableCTRLoopAnal)
582 return true;
583 TBB = SecondLastInst->getOperand(0).getMBB();
584 Cond.push_back(MachineOperand::CreateImm(0));
585 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
586 true));
587 FBB = LastInst->getOperand(0).getMBB();
588 return false;
589 }
590
591 // If the block ends with two PPC:Bs, handle it. The second one is not
592 // executed, so remove it.
593 if (SecondLastInst->getOpcode() == PPC::B &&
594 LastInst->getOpcode() == PPC::B) {
595 if (!SecondLastInst->getOperand(0).isMBB())
596 return true;
597 TBB = SecondLastInst->getOperand(0).getMBB();
598 I = LastInst;
599 if (AllowModify)
600 I->eraseFromParent();
601 return false;
602 }
603
604 // Otherwise, can't handle this.
605 return true;
606}
607
608unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
609 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
610 if (I == MBB.end())
611 return 0;
612
613 if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC &&
614 I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
615 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
616 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
617 return 0;
618
619 // Remove the branch.
620 I->eraseFromParent();
621
622 I = MBB.end();
623
624 if (I == MBB.begin()) return 1;
625 --I;
626 if (I->getOpcode() != PPC::BCC &&
627 I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
628 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
629 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
630 return 1;
631
632 // Remove the branch.
633 I->eraseFromParent();
634 return 2;
635}
636
637unsigned PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB,
638 MachineBasicBlock *TBB,
639 MachineBasicBlock *FBB,
640 ArrayRef<MachineOperand> Cond,
641 const DebugLoc &DL) const {
642 // Shouldn't be a fall through.
643 assert(TBB && "InsertBranch must not be told to insert a fallthrough")((TBB && "InsertBranch must not be told to insert a fallthrough"
) ? static_cast<void> (0) : __assert_fail ("TBB && \"InsertBranch must not be told to insert a fallthrough\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 643, __PRETTY_FUNCTION__))
;
644 assert((Cond.size() == 2 || Cond.size() == 0) &&(((Cond.size() == 2 || Cond.size() == 0) && "PPC branch conditions have two components!"
) ? static_cast<void> (0) : __assert_fail ("(Cond.size() == 2 || Cond.size() == 0) && \"PPC branch conditions have two components!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 645, __PRETTY_FUNCTION__))
645 "PPC branch conditions have two components!")(((Cond.size() == 2 || Cond.size() == 0) && "PPC branch conditions have two components!"
) ? static_cast<void> (0) : __assert_fail ("(Cond.size() == 2 || Cond.size() == 0) && \"PPC branch conditions have two components!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 645, __PRETTY_FUNCTION__))
;
646
647 bool isPPC64 = Subtarget.isPPC64();
648
649 // One-way branch.
650 if (!FBB) {
651 if (Cond.empty()) // Unconditional branch
652 BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
653 else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
654 BuildMI(&MBB, DL, get(Cond[0].getImm() ?
655 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
656 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
657 else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
658 BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB);
659 else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
660 BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB);
661 else // Conditional branch
662 BuildMI(&MBB, DL, get(PPC::BCC))
663 .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB);
664 return 1;
665 }
666
667 // Two-way Conditional Branch.
668 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
669 BuildMI(&MBB, DL, get(Cond[0].getImm() ?
670 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
671 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
672 else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
673 BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB);
674 else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
675 BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB);
676 else
677 BuildMI(&MBB, DL, get(PPC::BCC))
678 .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB);
679 BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
680 return 2;
681}
682
683// Select analysis.
684bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
685 ArrayRef<MachineOperand> Cond,
686 unsigned TrueReg, unsigned FalseReg,
687 int &CondCycles, int &TrueCycles, int &FalseCycles) const {
688 if (!Subtarget.hasISEL())
689 return false;
690
691 if (Cond.size() != 2)
692 return false;
693
694 // If this is really a bdnz-like condition, then it cannot be turned into a
695 // select.
696 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
697 return false;
698
699 // Check register classes.
700 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
701 const TargetRegisterClass *RC =
702 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
703 if (!RC)
704 return false;
705
706 // isel is for regular integer GPRs only.
707 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
708 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
709 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
710 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
711 return false;
712
713 // FIXME: These numbers are for the A2, how well they work for other cores is
714 // an open question. On the A2, the isel instruction has a 2-cycle latency
715 // but single-cycle throughput. These numbers are used in combination with
716 // the MispredictPenalty setting from the active SchedMachineModel.
717 CondCycles = 1;
718 TrueCycles = 1;
719 FalseCycles = 1;
720
721 return true;
722}
723
724void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
725 MachineBasicBlock::iterator MI,
726 const DebugLoc &dl, unsigned DestReg,
727 ArrayRef<MachineOperand> Cond, unsigned TrueReg,
728 unsigned FalseReg) const {
729 assert(Cond.size() == 2 &&((Cond.size() == 2 && "PPC branch conditions have two components!"
) ? static_cast<void> (0) : __assert_fail ("Cond.size() == 2 && \"PPC branch conditions have two components!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 730, __PRETTY_FUNCTION__))
730 "PPC branch conditions have two components!")((Cond.size() == 2 && "PPC branch conditions have two components!"
) ? static_cast<void> (0) : __assert_fail ("Cond.size() == 2 && \"PPC branch conditions have two components!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 730, __PRETTY_FUNCTION__))
;
731
732 assert(Subtarget.hasISEL() &&((Subtarget.hasISEL() && "Cannot insert select on target without ISEL support"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasISEL() && \"Cannot insert select on target without ISEL support\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 733, __PRETTY_FUNCTION__))
733 "Cannot insert select on target without ISEL support")((Subtarget.hasISEL() && "Cannot insert select on target without ISEL support"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasISEL() && \"Cannot insert select on target without ISEL support\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 733, __PRETTY_FUNCTION__))
;
734
735 // Get the register classes.
736 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
737 const TargetRegisterClass *RC =
738 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
739 assert(RC && "TrueReg and FalseReg must have overlapping register classes")((RC && "TrueReg and FalseReg must have overlapping register classes"
) ? static_cast<void> (0) : __assert_fail ("RC && \"TrueReg and FalseReg must have overlapping register classes\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 739, __PRETTY_FUNCTION__))
;
740
741 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
742 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
743 assert((Is64Bit ||(((Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass
.hasSubClassEq(RC)) && "isel is for regular integer GPRs only"
) ? static_cast<void> (0) : __assert_fail ("(Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && \"isel is for regular integer GPRs only\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 746, __PRETTY_FUNCTION__))
744 PPC::GPRCRegClass.hasSubClassEq(RC) ||(((Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass
.hasSubClassEq(RC)) && "isel is for regular integer GPRs only"
) ? static_cast<void> (0) : __assert_fail ("(Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && \"isel is for regular integer GPRs only\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 746, __PRETTY_FUNCTION__))
745 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&(((Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass
.hasSubClassEq(RC)) && "isel is for regular integer GPRs only"
) ? static_cast<void> (0) : __assert_fail ("(Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && \"isel is for regular integer GPRs only\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 746, __PRETTY_FUNCTION__))
746 "isel is for regular integer GPRs only")(((Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass
.hasSubClassEq(RC)) && "isel is for regular integer GPRs only"
) ? static_cast<void> (0) : __assert_fail ("(Is64Bit || PPC::GPRCRegClass.hasSubClassEq(RC) || PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && \"isel is for regular integer GPRs only\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 746, __PRETTY_FUNCTION__))
;
747
748 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
749 auto SelectPred = static_cast<PPC::Predicate>(Cond[0].getImm());
750
751 unsigned SubIdx = 0;
752 bool SwapOps = false;
753 switch (SelectPred) {
754 case PPC::PRED_EQ:
755 case PPC::PRED_EQ_MINUS:
756 case PPC::PRED_EQ_PLUS:
757 SubIdx = PPC::sub_eq; SwapOps = false; break;
758 case PPC::PRED_NE:
759 case PPC::PRED_NE_MINUS:
760 case PPC::PRED_NE_PLUS:
761 SubIdx = PPC::sub_eq; SwapOps = true; break;
762 case PPC::PRED_LT:
763 case PPC::PRED_LT_MINUS:
764 case PPC::PRED_LT_PLUS:
765 SubIdx = PPC::sub_lt; SwapOps = false; break;
766 case PPC::PRED_GE:
767 case PPC::PRED_GE_MINUS:
768 case PPC::PRED_GE_PLUS:
769 SubIdx = PPC::sub_lt; SwapOps = true; break;
770 case PPC::PRED_GT:
771 case PPC::PRED_GT_MINUS:
772 case PPC::PRED_GT_PLUS:
773 SubIdx = PPC::sub_gt; SwapOps = false; break;
774 case PPC::PRED_LE:
775 case PPC::PRED_LE_MINUS:
776 case PPC::PRED_LE_PLUS:
777 SubIdx = PPC::sub_gt; SwapOps = true; break;
778 case PPC::PRED_UN:
779 case PPC::PRED_UN_MINUS:
780 case PPC::PRED_UN_PLUS:
781 SubIdx = PPC::sub_un; SwapOps = false; break;
782 case PPC::PRED_NU:
783 case PPC::PRED_NU_MINUS:
784 case PPC::PRED_NU_PLUS:
785 SubIdx = PPC::sub_un; SwapOps = true; break;
786 case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break;
787 case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break;
788 }
789
790 unsigned FirstReg = SwapOps ? FalseReg : TrueReg,
791 SecondReg = SwapOps ? TrueReg : FalseReg;
792
793 // The first input register of isel cannot be r0. If it is a member
794 // of a register class that can be r0, then copy it first (the
795 // register allocator should eliminate the copy).
796 if (MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
797 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
798 const TargetRegisterClass *FirstRC =
799 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
800 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
801 unsigned OldFirstReg = FirstReg;
802 FirstReg = MRI.createVirtualRegister(FirstRC);
803 BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg)
804 .addReg(OldFirstReg);
805 }
806
807 BuildMI(MBB, MI, dl, get(OpCode), DestReg)
808 .addReg(FirstReg).addReg(SecondReg)
809 .addReg(Cond[1].getReg(), 0, SubIdx);
810}
811
812static unsigned getCRBitValue(unsigned CRBit) {
813 unsigned Ret = 4;
814 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
815 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
816 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
817 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
818 Ret = 3;
819 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
820 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
821 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
822 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
823 Ret = 2;
824 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
825 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
826 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
827 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
828 Ret = 1;
829 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
830 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
831 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
832 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
833 Ret = 0;
834
835 assert(Ret != 4 && "Invalid CR bit register")((Ret != 4 && "Invalid CR bit register") ? static_cast
<void> (0) : __assert_fail ("Ret != 4 && \"Invalid CR bit register\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 835, __PRETTY_FUNCTION__))
;
836 return Ret;
837}
838
839void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
840 MachineBasicBlock::iterator I,
841 const DebugLoc &DL, unsigned DestReg,
842 unsigned SrcReg, bool KillSrc) const {
843 // We can end up with self copies and similar things as a result of VSX copy
844 // legalization. Promote them here.
845 const TargetRegisterInfo *TRI = &getRegisterInfo();
846 if (PPC::F8RCRegClass.contains(DestReg) &&
847 PPC::VSRCRegClass.contains(SrcReg)) {
848 unsigned SuperReg =
849 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
850
851 if (VSXSelfCopyCrash && SrcReg == SuperReg)
852 llvm_unreachable("nop VSX copy")::llvm::llvm_unreachable_internal("nop VSX copy", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 852)
;
853
854 DestReg = SuperReg;
855 } else if (PPC::VRRCRegClass.contains(DestReg) &&
856 PPC::VSRCRegClass.contains(SrcReg)) {
857 unsigned SuperReg =
858 TRI->getMatchingSuperReg(DestReg, PPC::sub_128, &PPC::VSRCRegClass);
859
860 if (VSXSelfCopyCrash && SrcReg == SuperReg)
861 llvm_unreachable("nop VSX copy")::llvm::llvm_unreachable_internal("nop VSX copy", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 861)
;
862
863 DestReg = SuperReg;
864 } else if (PPC::F8RCRegClass.contains(SrcReg) &&
865 PPC::VSRCRegClass.contains(DestReg)) {
866 unsigned SuperReg =
867 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
868
869 if (VSXSelfCopyCrash && DestReg == SuperReg)
870 llvm_unreachable("nop VSX copy")::llvm::llvm_unreachable_internal("nop VSX copy", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 870)
;
871
872 SrcReg = SuperReg;
873 } else if (PPC::VRRCRegClass.contains(SrcReg) &&
874 PPC::VSRCRegClass.contains(DestReg)) {
875 unsigned SuperReg =
876 TRI->getMatchingSuperReg(SrcReg, PPC::sub_128, &PPC::VSRCRegClass);
877
878 if (VSXSelfCopyCrash && DestReg == SuperReg)
879 llvm_unreachable("nop VSX copy")::llvm::llvm_unreachable_internal("nop VSX copy", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 879)
;
880
881 SrcReg = SuperReg;
882 }
883
884 // Different class register copy
885 if (PPC::CRBITRCRegClass.contains(SrcReg) &&
886 PPC::GPRCRegClass.contains(DestReg)) {
887 unsigned CRReg = getCRFromCRBit(SrcReg);
888 BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(CRReg);
889 getKillRegState(KillSrc);
890 // Rotate the CR bit in the CR fields to be the least significant bit and
891 // then mask with 0x1 (MB = ME = 31).
892 BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg)
893 .addReg(DestReg, RegState::Kill)
894 .addImm(TRI->getEncodingValue(CRReg) * 4 + (4 - getCRBitValue(SrcReg)))
895 .addImm(31)
896 .addImm(31);
897 return;
898 } else if (PPC::CRRCRegClass.contains(SrcReg) &&
899 PPC::G8RCRegClass.contains(DestReg)) {
900 BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg).addReg(SrcReg);
901 getKillRegState(KillSrc);
902 return;
903 } else if (PPC::CRRCRegClass.contains(SrcReg) &&
904 PPC::GPRCRegClass.contains(DestReg)) {
905 BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(SrcReg);
906 getKillRegState(KillSrc);
907 return;
908 }
909
910 unsigned Opc;
911 if (PPC::GPRCRegClass.contains(DestReg, SrcReg))
912 Opc = PPC::OR;
913 else if (PPC::G8RCRegClass.contains(DestReg, SrcReg))
914 Opc = PPC::OR8;
915 else if (PPC::F4RCRegClass.contains(DestReg, SrcReg))
916 Opc = PPC::FMR;
917 else if (PPC::CRRCRegClass.contains(DestReg, SrcReg))
918 Opc = PPC::MCRF;
919 else if (PPC::VRRCRegClass.contains(DestReg, SrcReg))
920 Opc = PPC::VOR;
921 else if (PPC::VSRCRegClass.contains(DestReg, SrcReg))
922 // There are two different ways this can be done:
923 // 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only
924 // issue in VSU pipeline 0.
925 // 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but
926 // can go to either pipeline.
927 // We'll always use xxlor here, because in practically all cases where
928 // copies are generated, they are close enough to some use that the
929 // lower-latency form is preferable.
930 Opc = PPC::XXLOR;
931 else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg) ||
932 PPC::VSSRCRegClass.contains(DestReg, SrcReg))
933 Opc = PPC::XXLORf;
934 else if (PPC::QFRCRegClass.contains(DestReg, SrcReg))
935 Opc = PPC::QVFMR;
936 else if (PPC::QSRCRegClass.contains(DestReg, SrcReg))
937 Opc = PPC::QVFMRs;
938 else if (PPC::QBRCRegClass.contains(DestReg, SrcReg))
939 Opc = PPC::QVFMRb;
940 else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
941 Opc = PPC::CROR;
942 else
943 llvm_unreachable("Impossible reg-to-reg copy")::llvm::llvm_unreachable_internal("Impossible reg-to-reg copy"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 943)
;
944
945 const MCInstrDesc &MCID = get(Opc);
946 if (MCID.getNumOperands() == 3)
947 BuildMI(MBB, I, DL, MCID, DestReg)
948 .addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc));
949 else
950 BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
951}
952
953// This function returns true if a CR spill is necessary and false otherwise.
954bool
955PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
956 unsigned SrcReg, bool isKill,
957 int FrameIdx,
958 const TargetRegisterClass *RC,
959 SmallVectorImpl<MachineInstr*> &NewMIs,
960 bool &NonRI, bool &SpillsVRS) const{
961 // Note: If additional store instructions are added here,
962 // update isStoreToStackSlot.
963
964 DebugLoc DL;
965 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
966 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
967 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
968 .addReg(SrcReg,
969 getKillRegState(isKill)),
970 FrameIdx));
971 } else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
972 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
973 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD))
974 .addReg(SrcReg,
975 getKillRegState(isKill)),
976 FrameIdx));
977 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
978 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFD))
979 .addReg(SrcReg,
980 getKillRegState(isKill)),
981 FrameIdx));
982 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
983 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFS))
984 .addReg(SrcReg,
985 getKillRegState(isKill)),
986 FrameIdx));
987 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
988 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CR))
989 .addReg(SrcReg,
990 getKillRegState(isKill)),
991 FrameIdx));
992 return true;
993 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
994 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CRBIT))
995 .addReg(SrcReg,
996 getKillRegState(isKill)),
997 FrameIdx));
998 return true;
999 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1000 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX))
1001 .addReg(SrcReg,
1002 getKillRegState(isKill)),
1003 FrameIdx));
1004 NonRI = true;
1005 } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1006 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXVD2X))
1007 .addReg(SrcReg,
1008 getKillRegState(isKill)),
1009 FrameIdx));
1010 NonRI = true;
1011 } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1012 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSDX))
1013 .addReg(SrcReg,
1014 getKillRegState(isKill)),
1015 FrameIdx));
1016 NonRI = true;
1017 } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1018 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSSPX))
1019 .addReg(SrcReg,
1020 getKillRegState(isKill)),
1021 FrameIdx));
1022 NonRI = true;
1023 } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
1024 assert(Subtarget.isDarwin() &&((Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isDarwin() && \"VRSAVE only needs spill/restore on Darwin\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1025, __PRETTY_FUNCTION__))
1025 "VRSAVE only needs spill/restore on Darwin")((Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isDarwin() && \"VRSAVE only needs spill/restore on Darwin\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1025, __PRETTY_FUNCTION__))
;
1026 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE))
1027 .addReg(SrcReg,
1028 getKillRegState(isKill)),
1029 FrameIdx));
1030 SpillsVRS = true;
1031 } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
1032 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDX))
1033 .addReg(SrcReg,
1034 getKillRegState(isKill)),
1035 FrameIdx));
1036 NonRI = true;
1037 } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
1038 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFSXs))
1039 .addReg(SrcReg,
1040 getKillRegState(isKill)),
1041 FrameIdx));
1042 NonRI = true;
1043 } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
1044 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDXb))
1045 .addReg(SrcReg,
1046 getKillRegState(isKill)),
1047 FrameIdx));
1048 NonRI = true;
1049 } else {
1050 llvm_unreachable("Unknown regclass!")::llvm::llvm_unreachable_internal("Unknown regclass!", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1050)
;
1051 }
1052
1053 return false;
1054}
1055
1056void
1057PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1058 MachineBasicBlock::iterator MI,
1059 unsigned SrcReg, bool isKill, int FrameIdx,
1060 const TargetRegisterClass *RC,
1061 const TargetRegisterInfo *TRI) const {
1062 MachineFunction &MF = *MBB.getParent();
1063 SmallVector<MachineInstr*, 4> NewMIs;
1064
1065 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1066 FuncInfo->setHasSpills();
1067
1068 bool NonRI = false, SpillsVRS = false;
1069 if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs,
1070 NonRI, SpillsVRS))
1071 FuncInfo->setSpillsCR();
1072
1073 if (SpillsVRS)
1074 FuncInfo->setSpillsVRSAVE();
1075
1076 if (NonRI)
1077 FuncInfo->setHasNonRISpills();
1078
1079 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
1080 MBB.insert(MI, NewMIs[i]);
1081
1082 const MachineFrameInfo &MFI = *MF.getFrameInfo();
1083 MachineMemOperand *MMO = MF.getMachineMemOperand(
1084 MachinePointerInfo::getFixedStack(MF, FrameIdx),
1085 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
1086 MFI.getObjectAlignment(FrameIdx));
1087 NewMIs.back()->addMemOperand(MF, MMO);
1088}
1089
1090bool PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
1091 unsigned DestReg, int FrameIdx,
1092 const TargetRegisterClass *RC,
1093 SmallVectorImpl<MachineInstr *> &NewMIs,
1094 bool &NonRI, bool &SpillsVRS) const {
1095 // Note: If additional load instructions are added here,
1096 // update isLoadFromStackSlot.
1097
1098 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1099 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1100 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
1101 DestReg), FrameIdx));
1102 } else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1103 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1104 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg),
1105 FrameIdx));
1106 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1107 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg),
1108 FrameIdx));
1109 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1110 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFS), DestReg),
1111 FrameIdx));
1112 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1113 NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
1114 get(PPC::RESTORE_CR), DestReg),
1115 FrameIdx));
1116 return true;
1117 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1118 NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
1119 get(PPC::RESTORE_CRBIT), DestReg),
1120 FrameIdx));
1121 return true;
1122 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1123 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg),
1124 FrameIdx));
1125 NonRI = true;
1126 } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1127 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXVD2X), DestReg),
1128 FrameIdx));
1129 NonRI = true;
1130 } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1131 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSDX), DestReg),
1132 FrameIdx));
1133 NonRI = true;
1134 } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1135 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSSPX), DestReg),
1136 FrameIdx));
1137 NonRI = true;
1138 } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
1139 assert(Subtarget.isDarwin() &&((Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isDarwin() && \"VRSAVE only needs spill/restore on Darwin\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1140, __PRETTY_FUNCTION__))
1140 "VRSAVE only needs spill/restore on Darwin")((Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isDarwin() && \"VRSAVE only needs spill/restore on Darwin\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1140, __PRETTY_FUNCTION__))
;
1141 NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
1142 get(PPC::RESTORE_VRSAVE),
1143 DestReg),
1144 FrameIdx));
1145 SpillsVRS = true;
1146 } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
1147 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDX), DestReg),
1148 FrameIdx));
1149 NonRI = true;
1150 } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
1151 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFSXs), DestReg),
1152 FrameIdx));
1153 NonRI = true;
1154 } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
1155 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDXb), DestReg),
1156 FrameIdx));
1157 NonRI = true;
1158 } else {
1159 llvm_unreachable("Unknown regclass!")::llvm::llvm_unreachable_internal("Unknown regclass!", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1159)
;
1160 }
1161
1162 return false;
1163}
1164
1165void
1166PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1167 MachineBasicBlock::iterator MI,
1168 unsigned DestReg, int FrameIdx,
1169 const TargetRegisterClass *RC,
1170 const TargetRegisterInfo *TRI) const {
1171 MachineFunction &MF = *MBB.getParent();
1172 SmallVector<MachineInstr*, 4> NewMIs;
1173 DebugLoc DL;
1174 if (MI != MBB.end()) DL = MI->getDebugLoc();
1175
1176 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1177 FuncInfo->setHasSpills();
1178
1179 bool NonRI = false, SpillsVRS = false;
1180 if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs,
1181 NonRI, SpillsVRS))
1182 FuncInfo->setSpillsCR();
1183
1184 if (SpillsVRS)
1185 FuncInfo->setSpillsVRSAVE();
1186
1187 if (NonRI)
1188 FuncInfo->setHasNonRISpills();
1189
1190 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
1191 MBB.insert(MI, NewMIs[i]);
1192
1193 const MachineFrameInfo &MFI = *MF.getFrameInfo();
1194 MachineMemOperand *MMO = MF.getMachineMemOperand(
1195 MachinePointerInfo::getFixedStack(MF, FrameIdx),
1196 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
1197 MFI.getObjectAlignment(FrameIdx));
1198 NewMIs.back()->addMemOperand(MF, MMO);
1199}
1200
1201bool PPCInstrInfo::
1202ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1203 assert(Cond.size() == 2 && "Invalid PPC branch opcode!")((Cond.size() == 2 && "Invalid PPC branch opcode!") ?
static_cast<void> (0) : __assert_fail ("Cond.size() == 2 && \"Invalid PPC branch opcode!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1203, __PRETTY_FUNCTION__))
;
1204 if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR)
1205 Cond[0].setImm(Cond[0].getImm() == 0 ? 1 : 0);
1206 else
1207 // Leave the CR# the same, but invert the condition.
1208 Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm()));
1209 return false;
1210}
1211
1212bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1213 unsigned Reg, MachineRegisterInfo *MRI) const {
1214 // For some instructions, it is legal to fold ZERO into the RA register field.
1215 // A zero immediate should always be loaded with a single li.
1216 unsigned DefOpc = DefMI.getOpcode();
1217 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
1218 return false;
1219 if (!DefMI.getOperand(1).isImm())
1220 return false;
1221 if (DefMI.getOperand(1).getImm() != 0)
1222 return false;
1223
1224 // Note that we cannot here invert the arguments of an isel in order to fold
1225 // a ZERO into what is presented as the second argument. All we have here
1226 // is the condition bit, and that might come from a CR-logical bit operation.
1227
1228 const MCInstrDesc &UseMCID = UseMI.getDesc();
1229
1230 // Only fold into real machine instructions.
1231 if (UseMCID.isPseudo())
1232 return false;
1233
1234 unsigned UseIdx;
1235 for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx)
1236 if (UseMI.getOperand(UseIdx).isReg() &&
1237 UseMI.getOperand(UseIdx).getReg() == Reg)
1238 break;
1239
1240 assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI")((UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI"
) ? static_cast<void> (0) : __assert_fail ("UseIdx < UseMI.getNumOperands() && \"Cannot find Reg in UseMI\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1240, __PRETTY_FUNCTION__))
;
1241 assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg")((UseIdx < UseMCID.getNumOperands() && "No operand description for Reg"
) ? static_cast<void> (0) : __assert_fail ("UseIdx < UseMCID.getNumOperands() && \"No operand description for Reg\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1241, __PRETTY_FUNCTION__))
;
1242
1243 const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
1244
1245 // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0
1246 // register (which might also be specified as a pointer class kind).
1247 if (UseInfo->isLookupPtrRegClass()) {
1248 if (UseInfo->RegClass /* Kind */ != 1)
1249 return false;
1250 } else {
1251 if (UseInfo->RegClass != PPC::GPRC_NOR0RegClassID &&
1252 UseInfo->RegClass != PPC::G8RC_NOX0RegClassID)
1253 return false;
1254 }
1255
1256 // Make sure this is not tied to an output register (or otherwise
1257 // constrained). This is true for ST?UX registers, for example, which
1258 // are tied to their output registers.
1259 if (UseInfo->Constraints != 0)
1260 return false;
1261
1262 unsigned ZeroReg;
1263 if (UseInfo->isLookupPtrRegClass()) {
1264 bool isPPC64 = Subtarget.isPPC64();
1265 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
1266 } else {
1267 ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ?
1268 PPC::ZERO8 : PPC::ZERO;
1269 }
1270
1271 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1272 UseMI.getOperand(UseIdx).setReg(ZeroReg);
1273
1274 if (DeleteDef)
1275 DefMI.eraseFromParent();
1276
1277 return true;
1278}
1279
1280static bool MBBDefinesCTR(MachineBasicBlock &MBB) {
1281 for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
1282 I != IE; ++I)
1283 if (I->definesRegister(PPC::CTR) || I->definesRegister(PPC::CTR8))
1284 return true;
1285 return false;
1286}
1287
1288// We should make sure that, if we're going to predicate both sides of a
1289// condition (a diamond), that both sides don't define the counter register. We
1290// can predicate counter-decrement-based branches, but while that predicates
1291// the branching, it does not predicate the counter decrement. If we tried to
1292// merge the triangle into one predicated block, we'd decrement the counter
1293// twice.
1294bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
1295 unsigned NumT, unsigned ExtraT,
1296 MachineBasicBlock &FMBB,
1297 unsigned NumF, unsigned ExtraF,
1298 BranchProbability Probability) const {
1299 return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB));
1300}
1301
1302
1303bool PPCInstrInfo::isPredicated(const MachineInstr &MI) const {
1304 // The predicated branches are identified by their type, not really by the
1305 // explicit presence of a predicate. Furthermore, some of them can be
1306 // predicated more than once. Because if conversion won't try to predicate
1307 // any instruction which already claims to be predicated (by returning true
1308 // here), always return false. In doing so, we let isPredicable() be the
1309 // final word on whether not the instruction can be (further) predicated.
1310
1311 return false;
1312}
1313
1314bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
1315 if (!MI.isTerminator())
1316 return false;
1317
1318 // Conditional branch is a special case.
1319 if (MI.isBranch() && !MI.isBarrier())
1320 return true;
1321
1322 return !isPredicated(MI);
1323}
1324
1325bool PPCInstrInfo::PredicateInstruction(MachineInstr &MI,
1326 ArrayRef<MachineOperand> Pred) const {
1327 unsigned OpC = MI.getOpcode();
1328 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
1329 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
1330 bool isPPC64 = Subtarget.isPPC64();
1331 MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
1332 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
1333 } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
1334 MI.setDesc(get(PPC::BCLR));
1335 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1336 .addReg(Pred[1].getReg());
1337 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
1338 MI.setDesc(get(PPC::BCLRn));
1339 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1340 .addReg(Pred[1].getReg());
1341 } else {
1342 MI.setDesc(get(PPC::BCCLR));
1343 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1344 .addImm(Pred[0].getImm())
1345 .addReg(Pred[1].getReg());
1346 }
1347
1348 return true;
1349 } else if (OpC == PPC::B) {
1350 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
1351 bool isPPC64 = Subtarget.isPPC64();
1352 MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
1353 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
1354 } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
1355 MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
1356 MI.RemoveOperand(0);
1357
1358 MI.setDesc(get(PPC::BC));
1359 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1360 .addReg(Pred[1].getReg())
1361 .addMBB(MBB);
1362 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
1363 MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
1364 MI.RemoveOperand(0);
1365
1366 MI.setDesc(get(PPC::BCn));
1367 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1368 .addReg(Pred[1].getReg())
1369 .addMBB(MBB);
1370 } else {
1371 MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
1372 MI.RemoveOperand(0);
1373
1374 MI.setDesc(get(PPC::BCC));
1375 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1376 .addImm(Pred[0].getImm())
1377 .addReg(Pred[1].getReg())
1378 .addMBB(MBB);
1379 }
1380
1381 return true;
1382 } else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 ||
1383 OpC == PPC::BCTRL || OpC == PPC::BCTRL8) {
1384 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR)
1385 llvm_unreachable("Cannot predicate bctr[l] on the ctr register")::llvm::llvm_unreachable_internal("Cannot predicate bctr[l] on the ctr register"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1385)
;
1386
1387 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8;
1388 bool isPPC64 = Subtarget.isPPC64();
1389
1390 if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
1391 MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
1392 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
1393 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1394 .addReg(Pred[1].getReg());
1395 return true;
1396 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
1397 MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
1398 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
1399 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1400 .addReg(Pred[1].getReg());
1401 return true;
1402 }
1403
1404 MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
1405 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
1406 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1407 .addImm(Pred[0].getImm())
1408 .addReg(Pred[1].getReg());
1409 return true;
1410 }
1411
1412 return false;
1413}
1414
1415bool PPCInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1416 ArrayRef<MachineOperand> Pred2) const {
1417 assert(Pred1.size() == 2 && "Invalid PPC first predicate")((Pred1.size() == 2 && "Invalid PPC first predicate")
? static_cast<void> (0) : __assert_fail ("Pred1.size() == 2 && \"Invalid PPC first predicate\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1417, __PRETTY_FUNCTION__))
;
1418 assert(Pred2.size() == 2 && "Invalid PPC second predicate")((Pred2.size() == 2 && "Invalid PPC second predicate"
) ? static_cast<void> (0) : __assert_fail ("Pred2.size() == 2 && \"Invalid PPC second predicate\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1418, __PRETTY_FUNCTION__))
;
1419
1420 if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR)
1421 return false;
1422 if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR)
1423 return false;
1424
1425 // P1 can only subsume P2 if they test the same condition register.
1426 if (Pred1[1].getReg() != Pred2[1].getReg())
1427 return false;
1428
1429 PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm();
1430 PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm();
1431
1432 if (P1 == P2)
1433 return true;
1434
1435 // Does P1 subsume P2, e.g. GE subsumes GT.
1436 if (P1 == PPC::PRED_LE &&
1437 (P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ))
1438 return true;
1439 if (P1 == PPC::PRED_GE &&
1440 (P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ))
1441 return true;
1442
1443 return false;
1444}
1445
1446bool PPCInstrInfo::DefinesPredicate(MachineInstr &MI,
1447 std::vector<MachineOperand> &Pred) const {
1448 // Note: At the present time, the contents of Pred from this function is
1449 // unused by IfConversion. This implementation follows ARM by pushing the
1450 // CR-defining operand. Because the 'DZ' and 'DNZ' count as types of
1451 // predicate, instructions defining CTR or CTR8 are also included as
1452 // predicate-defining instructions.
1453
1454 const TargetRegisterClass *RCs[] =
1455 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
1456 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
1457
1458 bool Found = false;
1459 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1460 const MachineOperand &MO = MI.getOperand(i);
1461 for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) {
1462 const TargetRegisterClass *RC = RCs[c];
1463 if (MO.isReg()) {
1464 if (MO.isDef() && RC->contains(MO.getReg())) {
1465 Pred.push_back(MO);
1466 Found = true;
1467 }
1468 } else if (MO.isRegMask()) {
1469 for (TargetRegisterClass::iterator I = RC->begin(),
1470 IE = RC->end(); I != IE; ++I)
1471 if (MO.clobbersPhysReg(*I)) {
1472 Pred.push_back(MO);
1473 Found = true;
1474 }
1475 }
1476 }
1477 }
1478
1479 return Found;
1480}
1481
1482bool PPCInstrInfo::isPredicable(MachineInstr &MI) const {
1483 unsigned OpC = MI.getOpcode();
1484 switch (OpC) {
1485 default:
1486 return false;
1487 case PPC::B:
1488 case PPC::BLR:
1489 case PPC::BLR8:
1490 case PPC::BCTR:
1491 case PPC::BCTR8:
1492 case PPC::BCTRL:
1493 case PPC::BCTRL8:
1494 return true;
1495 }
1496}
1497
1498bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1499 unsigned &SrcReg2, int &Mask,
1500 int &Value) const {
1501 unsigned Opc = MI.getOpcode();
1502
1503 switch (Opc) {
1504 default: return false;
1505 case PPC::CMPWI:
1506 case PPC::CMPLWI:
1507 case PPC::CMPDI:
1508 case PPC::CMPLDI:
1509 SrcReg = MI.getOperand(1).getReg();
1510 SrcReg2 = 0;
1511 Value = MI.getOperand(2).getImm();
1512 Mask = 0xFFFF;
1513 return true;
1514 case PPC::CMPW:
1515 case PPC::CMPLW:
1516 case PPC::CMPD:
1517 case PPC::CMPLD:
1518 case PPC::FCMPUS:
1519 case PPC::FCMPUD:
1520 SrcReg = MI.getOperand(1).getReg();
1521 SrcReg2 = MI.getOperand(2).getReg();
1522 return true;
1523 }
1524}
1525
1526bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
1527 unsigned SrcReg2, int Mask, int Value,
1528 const MachineRegisterInfo *MRI) const {
1529 if (DisableCmpOpt)
1530 return false;
1531
1532 int OpC = CmpInstr.getOpcode();
1533 unsigned CRReg = CmpInstr.getOperand(0).getReg();
1534
1535 // FP record forms set CR1 based on the execption status bits, not a
1536 // comparison with zero.
1537 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
1538 return false;
1539
1540 // The record forms set the condition register based on a signed comparison
1541 // with zero (so says the ISA manual). This is not as straightforward as it
1542 // seems, however, because this is always a 64-bit comparison on PPC64, even
1543 // for instructions that are 32-bit in nature (like slw for example).
1544 // So, on PPC32, for unsigned comparisons, we can use the record forms only
1545 // for equality checks (as those don't depend on the sign). On PPC64,
1546 // we are restricted to equality for unsigned 64-bit comparisons and for
1547 // signed 32-bit comparisons the applicability is more restricted.
1548 bool isPPC64 = Subtarget.isPPC64();
1549 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
1550 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
1551 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
1552
1553 // Get the unique definition of SrcReg.
1554 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1555 if (!MI) return false;
1556 int MIOpC = MI->getOpcode();
1557
1558 bool equalityOnly = false;
1559 bool noSub = false;
1560 if (isPPC64) {
1561 if (is32BitSignedCompare) {
1562 // We can perform this optimization only if MI is sign-extending.
1563 if (MIOpC == PPC::SRAW || MIOpC == PPC::SRAWo ||
1564 MIOpC == PPC::SRAWI || MIOpC == PPC::SRAWIo ||
1565 MIOpC == PPC::EXTSB || MIOpC == PPC::EXTSBo ||
1566 MIOpC == PPC::EXTSH || MIOpC == PPC::EXTSHo ||
1567 MIOpC == PPC::EXTSW || MIOpC == PPC::EXTSWo) {
1568 noSub = true;
1569 } else
1570 return false;
1571 } else if (is32BitUnsignedCompare) {
1572 // 32-bit rotate and mask instructions are zero extending only if MB <= ME
1573 bool isZeroExtendingRotate =
1574 (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINMo ||
1575 MIOpC == PPC::RLWNM || MIOpC == PPC::RLWNMo)
1576 && MI->getOperand(3).getImm() <= MI->getOperand(4).getImm();
1577
1578 // We can perform this optimization, equality only, if MI is
1579 // zero-extending.
1580 if (MIOpC == PPC::CNTLZW || MIOpC == PPC::CNTLZWo ||
1581 MIOpC == PPC::SLW || MIOpC == PPC::SLWo ||
1582 MIOpC == PPC::SRW || MIOpC == PPC::SRWo ||
1583 isZeroExtendingRotate) {
1584 noSub = true;
1585 equalityOnly = true;
1586 } else
1587 return false;
1588 } else
1589 equalityOnly = is64BitUnsignedCompare;
1590 } else
1591 equalityOnly = is32BitUnsignedCompare;
1592
1593 if (equalityOnly) {
1594 // We need to check the uses of the condition register in order to reject
1595 // non-equality comparisons.
1596 for (MachineRegisterInfo::use_instr_iterator I =MRI->use_instr_begin(CRReg),
1597 IE = MRI->use_instr_end(); I != IE; ++I) {
1598 MachineInstr *UseMI = &*I;
1599 if (UseMI->getOpcode() == PPC::BCC) {
1600 unsigned Pred = UseMI->getOperand(0).getImm();
1601 if (Pred != PPC::PRED_EQ && Pred != PPC::PRED_NE)
1602 return false;
1603 } else if (UseMI->getOpcode() == PPC::ISEL ||
1604 UseMI->getOpcode() == PPC::ISEL8) {
1605 unsigned SubIdx = UseMI->getOperand(3).getSubReg();
1606 if (SubIdx != PPC::sub_eq)
1607 return false;
1608 } else
1609 return false;
1610 }
1611 }
1612
1613 MachineBasicBlock::iterator I = CmpInstr;
1614
1615 // Scan forward to find the first use of the compare.
1616 for (MachineBasicBlock::iterator EL = CmpInstr.getParent()->end(); I != EL;
1617 ++I) {
1618 bool FoundUse = false;
1619 for (MachineRegisterInfo::use_instr_iterator J =MRI->use_instr_begin(CRReg),
1620 JE = MRI->use_instr_end(); J != JE; ++J)
1621 if (&*J == &*I) {
1622 FoundUse = true;
1623 break;
1624 }
1625
1626 if (FoundUse)
1627 break;
1628 }
1629
1630 // There are two possible candidates which can be changed to set CR[01].
1631 // One is MI, the other is a SUB instruction.
1632 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
1633 MachineInstr *Sub = nullptr;
1634 if (SrcReg2 != 0)
1635 // MI is not a candidate for CMPrr.
1636 MI = nullptr;
1637 // FIXME: Conservatively refuse to convert an instruction which isn't in the
1638 // same BB as the comparison. This is to allow the check below to avoid calls
1639 // (and other explicit clobbers); instead we should really check for these
1640 // more explicitly (in at least a few predecessors).
1641 else if (MI->getParent() != CmpInstr.getParent() || Value != 0) {
1642 // PPC does not have a record-form SUBri.
1643 return false;
1644 }
1645
1646 // Search for Sub.
1647 const TargetRegisterInfo *TRI = &getRegisterInfo();
1648 --I;
1649
1650 // Get ready to iterate backward from CmpInstr.
1651 MachineBasicBlock::iterator E = MI, B = CmpInstr.getParent()->begin();
1652
1653 for (; I != E && !noSub; --I) {
1654 const MachineInstr &Instr = *I;
1655 unsigned IOpC = Instr.getOpcode();
1656
1657 if (&*I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0, TRI) ||
1658 Instr.readsRegister(PPC::CR0, TRI)))
1659 // This instruction modifies or uses the record condition register after
1660 // the one we want to change. While we could do this transformation, it
1661 // would likely not be profitable. This transformation removes one
1662 // instruction, and so even forcing RA to generate one move probably
1663 // makes it unprofitable.
1664 return false;
1665
1666 // Check whether CmpInstr can be made redundant by the current instruction.
1667 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
1668 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
1669 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
1670 ((Instr.getOperand(1).getReg() == SrcReg &&
1671 Instr.getOperand(2).getReg() == SrcReg2) ||
1672 (Instr.getOperand(1).getReg() == SrcReg2 &&
1673 Instr.getOperand(2).getReg() == SrcReg))) {
1674 Sub = &*I;
1675 break;
1676 }
1677
1678 if (I == B)
1679 // The 'and' is below the comparison instruction.
1680 return false;
1681 }
1682
1683 // Return false if no candidates exist.
1684 if (!MI && !Sub)
1685 return false;
1686
1687 // The single candidate is called MI.
1688 if (!MI) MI = Sub;
1689
1690 int NewOpC = -1;
1691 MIOpC = MI->getOpcode();
1692 if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8)
1693 NewOpC = MIOpC;
1694 else {
1695 NewOpC = PPC::getRecordFormOpcode(MIOpC);
1696 if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1)
1697 NewOpC = MIOpC;
1698 }
1699
1700 // FIXME: On the non-embedded POWER architectures, only some of the record
1701 // forms are fast, and we should use only the fast ones.
1702
1703 // The defining instruction has a record form (or is already a record
1704 // form). It is possible, however, that we'll need to reverse the condition
1705 // code of the users.
1706 if (NewOpC == -1)
1707 return false;
1708
1709 SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate;
1710 SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate;
1711
1712 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP
1713 // needs to be updated to be based on SUB. Push the condition code
1714 // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the
1715 // condition code of these operands will be modified.
1716 bool ShouldSwap = false;
1717 if (Sub) {
1718 ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
1719 Sub->getOperand(2).getReg() == SrcReg;
1720
1721 // The operands to subf are the opposite of sub, so only in the fixed-point
1722 // case, invert the order.
1723 ShouldSwap = !ShouldSwap;
1724 }
1725
1726 if (ShouldSwap)
1727 for (MachineRegisterInfo::use_instr_iterator
1728 I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end();
1729 I != IE; ++I) {
1730 MachineInstr *UseMI = &*I;
1731 if (UseMI->getOpcode() == PPC::BCC) {
1732 PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm();
1733 assert((!equalityOnly ||(((!equalityOnly || Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE
) && "Invalid predicate for equality-only optimization"
) ? static_cast<void> (0) : __assert_fail ("(!equalityOnly || Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) && \"Invalid predicate for equality-only optimization\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1735, __PRETTY_FUNCTION__))
1734 Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) &&(((!equalityOnly || Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE
) && "Invalid predicate for equality-only optimization"
) ? static_cast<void> (0) : __assert_fail ("(!equalityOnly || Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) && \"Invalid predicate for equality-only optimization\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1735, __PRETTY_FUNCTION__))
1735 "Invalid predicate for equality-only optimization")(((!equalityOnly || Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE
) && "Invalid predicate for equality-only optimization"
) ? static_cast<void> (0) : __assert_fail ("(!equalityOnly || Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) && \"Invalid predicate for equality-only optimization\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1735, __PRETTY_FUNCTION__))
;
1736 PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)),
1737 PPC::getSwappedPredicate(Pred)));
1738 } else if (UseMI->getOpcode() == PPC::ISEL ||
1739 UseMI->getOpcode() == PPC::ISEL8) {
1740 unsigned NewSubReg = UseMI->getOperand(3).getSubReg();
1741 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&(((!equalityOnly || NewSubReg == PPC::sub_eq) && "Invalid CR bit for equality-only optimization"
) ? static_cast<void> (0) : __assert_fail ("(!equalityOnly || NewSubReg == PPC::sub_eq) && \"Invalid CR bit for equality-only optimization\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1742, __PRETTY_FUNCTION__))
1742 "Invalid CR bit for equality-only optimization")(((!equalityOnly || NewSubReg == PPC::sub_eq) && "Invalid CR bit for equality-only optimization"
) ? static_cast<void> (0) : __assert_fail ("(!equalityOnly || NewSubReg == PPC::sub_eq) && \"Invalid CR bit for equality-only optimization\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1742, __PRETTY_FUNCTION__))
;
1743
1744 if (NewSubReg == PPC::sub_lt)
1745 NewSubReg = PPC::sub_gt;
1746 else if (NewSubReg == PPC::sub_gt)
1747 NewSubReg = PPC::sub_lt;
1748
1749 SubRegsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(3)),
1750 NewSubReg));
1751 } else // We need to abort on a user we don't understand.
1752 return false;
1753 }
1754
1755 // Create a new virtual register to hold the value of the CR set by the
1756 // record-form instruction. If the instruction was not previously in
1757 // record form, then set the kill flag on the CR.
1758 CmpInstr.eraseFromParent();
1759
1760 MachineBasicBlock::iterator MII = MI;
1761 BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
1762 get(TargetOpcode::COPY), CRReg)
1763 .addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0);
1764
1765 // Even if CR0 register were dead before, it is alive now since the
1766 // instruction we just built uses it.
1767 MI->clearRegisterDeads(PPC::CR0);
1768
1769 if (MIOpC != NewOpC) {
1770 // We need to be careful here: we're replacing one instruction with
1771 // another, and we need to make sure that we get all of the right
1772 // implicit uses and defs. On the other hand, the caller may be holding
1773 // an iterator to this instruction, and so we can't delete it (this is
1774 // specifically the case if this is the instruction directly after the
1775 // compare).
1776
1777 const MCInstrDesc &NewDesc = get(NewOpC);
1778 MI->setDesc(NewDesc);
1779
1780 if (NewDesc.ImplicitDefs)
1781 for (const MCPhysReg *ImpDefs = NewDesc.getImplicitDefs();
1782 *ImpDefs; ++ImpDefs)
1783 if (!MI->definesRegister(*ImpDefs))
1784 MI->addOperand(*MI->getParent()->getParent(),
1785 MachineOperand::CreateReg(*ImpDefs, true, true));
1786 if (NewDesc.ImplicitUses)
1787 for (const MCPhysReg *ImpUses = NewDesc.getImplicitUses();
1788 *ImpUses; ++ImpUses)
1789 if (!MI->readsRegister(*ImpUses))
1790 MI->addOperand(*MI->getParent()->getParent(),
1791 MachineOperand::CreateReg(*ImpUses, false, true));
1792 }
1793 assert(MI->definesRegister(PPC::CR0) &&((MI->definesRegister(PPC::CR0) && "Record-form instruction does not define cr0?"
) ? static_cast<void> (0) : __assert_fail ("MI->definesRegister(PPC::CR0) && \"Record-form instruction does not define cr0?\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1794, __PRETTY_FUNCTION__))
1794 "Record-form instruction does not define cr0?")((MI->definesRegister(PPC::CR0) && "Record-form instruction does not define cr0?"
) ? static_cast<void> (0) : __assert_fail ("MI->definesRegister(PPC::CR0) && \"Record-form instruction does not define cr0?\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1794, __PRETTY_FUNCTION__))
;
1795
1796 // Modify the condition code of operands in OperandsToUpdate.
1797 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
1798 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
1799 for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++)
1800 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
1801
1802 for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++)
1803 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
1804
1805 return true;
1806}
1807
1808/// GetInstSize - Return the number of bytes of code the specified
1809/// instruction may be. This returns the maximum number of bytes.
1810///
1811unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
1812 unsigned Opcode = MI.getOpcode();
1813
1814 if (Opcode == PPC::INLINEASM) {
1815 const MachineFunction *MF = MI.getParent()->getParent();
1816 const char *AsmStr = MI.getOperand(0).getSymbolName();
1817 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1818 } else if (Opcode == TargetOpcode::STACKMAP) {
1819 return MI.getOperand(1).getImm();
1820 } else if (Opcode == TargetOpcode::PATCHPOINT) {
1821 PatchPointOpers Opers(&MI);
1822 return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
1823 } else {
1824 const MCInstrDesc &Desc = get(Opcode);
1825 return Desc.getSize();
1826 }
1827}
1828
1829std::pair<unsigned, unsigned>
1830PPCInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
1831 const unsigned Mask = PPCII::MO_ACCESS_MASK;
1832 return std::make_pair(TF & Mask, TF & ~Mask);
1833}
1834
1835ArrayRef<std::pair<unsigned, const char *>>
1836PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
1837 using namespace PPCII;
1838 static const std::pair<unsigned, const char *> TargetFlags[] = {
1839 {MO_LO, "ppc-lo"},
1840 {MO_HA, "ppc-ha"},
1841 {MO_TPREL_LO, "ppc-tprel-lo"},
1842 {MO_TPREL_HA, "ppc-tprel-ha"},
1843 {MO_DTPREL_LO, "ppc-dtprel-lo"},
1844 {MO_TLSLD_LO, "ppc-tlsld-lo"},
1845 {MO_TOC_LO, "ppc-toc-lo"},
1846 {MO_TLS, "ppc-tls"}};
1847 return makeArrayRef(TargetFlags);
1848}
1849
1850ArrayRef<std::pair<unsigned, const char *>>
1851PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
1852 using namespace PPCII;
1853 static const std::pair<unsigned, const char *> TargetFlags[] = {
1854 {MO_PLT, "ppc-plt"},
1855 {MO_PIC_FLAG, "ppc-pic"},
1856 {MO_NLP_FLAG, "ppc-nlp"},
1857 {MO_NLP_HIDDEN_FLAG, "ppc-nlp-hidden"}};
1858 return makeArrayRef(TargetFlags);
1859}
1860
1861bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1862 switch (MI.getOpcode()) {
1863 case TargetOpcode::LOAD_STACK_GUARD: {
1864 assert(Subtarget.isTargetLinux() &&((Subtarget.isTargetLinux() && "Only Linux target is expected to contain LOAD_STACK_GUARD"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isTargetLinux() && \"Only Linux target is expected to contain LOAD_STACK_GUARD\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1865, __PRETTY_FUNCTION__))
1865 "Only Linux target is expected to contain LOAD_STACK_GUARD")((Subtarget.isTargetLinux() && "Only Linux target is expected to contain LOAD_STACK_GUARD"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isTargetLinux() && \"Only Linux target is expected to contain LOAD_STACK_GUARD\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn274831/lib/Target/PowerPC/PPCInstrInfo.cpp"
, 1865, __PRETTY_FUNCTION__))
;
1866 const int64_t Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
1867 const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
1868 MI.setDesc(get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
1869 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
1870 .addImm(Offset)
1871 .addReg(Reg);
1872 return true;
1873 }
1874 }
1875 return false;
1876}