LLVM  3.7.0
PPCVSXSwapRemoval.cpp
Go to the documentation of this file.
1 //===----------- PPCVSXSwapRemoval.cpp - Remove VSX LE Swaps -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===---------------------------------------------------------------------===//
9 //
10 // This pass analyzes vector computations and removes unnecessary
11 // doubleword swaps (xxswapd instructions). This pass is performed
12 // only for little-endian VSX code generation.
13 //
14 // For this specific case, loads and stores of v4i32, v4f32, v2i64,
15 // and v2f64 vectors are inefficient. These are implemented using
16 // the lxvd2x and stxvd2x instructions, which invert the order of
17 // doublewords in a vector register. Thus code generation inserts
18 // an xxswapd after each such load, and prior to each such store.
19 //
20 // The extra xxswapd instructions reduce performance. The purpose
21 // of this pass is to reduce the number of xxswapd instructions
22 // required for correctness.
23 //
24 // The primary insight is that much code that operates on vectors
25 // does not care about the relative order of elements in a register,
26 // so long as the correct memory order is preserved. If we have a
27 // computation where all input values are provided by lxvd2x/xxswapd,
28 // all outputs are stored using xxswapd/lxvd2x, and all intermediate
29 // computations are lane-insensitive (independent of element order),
30 // then all the xxswapd instructions associated with the loads and
31 // stores may be removed without changing observable semantics.
32 //
33 // This pass uses standard equivalence class infrastructure to create
34 // maximal webs of computations fitting the above description. Each
35 // such web is then optimized by removing its unnecessary xxswapd
36 // instructions.
37 //
38 // There are some lane-sensitive operations for which we can still
39 // permit the optimization, provided we modify those operations
40 // accordingly. Such operations are identified as using "special
41 // handling" within this module.
42 //
43 //===---------------------------------------------------------------------===//
44 
45 #include "PPCInstrInfo.h"
46 #include "PPC.h"
47 #include "PPCInstrBuilder.h"
48 #include "PPCTargetMachine.h"
49 #include "llvm/ADT/DenseMap.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/Format.h"
57 
58 using namespace llvm;
59 
60 #define DEBUG_TYPE "ppc-vsx-swaps"
61 
62 namespace llvm {
64 }
65 
66 namespace {
67 
68 // A PPCVSXSwapEntry is created for each machine instruction that
69 // is relevant to a vector computation.
70 struct PPCVSXSwapEntry {
71  // Pointer to the instruction.
72  MachineInstr *VSEMI;
73 
74  // Unique ID (position in the swap vector).
75  int VSEId;
76 
77  // Attributes of this node.
78  unsigned int IsLoad : 1;
79  unsigned int IsStore : 1;
80  unsigned int IsSwap : 1;
81  unsigned int MentionsPhysVR : 1;
82  unsigned int IsSwappable : 1;
83  unsigned int MentionsPartialVR : 1;
84  unsigned int SpecialHandling : 3;
85  unsigned int WebRejected : 1;
86  unsigned int WillRemove : 1;
87 };
88 
89 enum SHValues {
90  SH_NONE = 0,
91  SH_EXTRACT,
92  SH_INSERT,
93  SH_NOSWAP_LD,
94  SH_NOSWAP_ST,
95  SH_SPLAT,
96  SH_XXPERMDI,
97  SH_COPYSCALAR
98 };
99 
100 struct PPCVSXSwapRemoval : public MachineFunctionPass {
101 
102  static char ID;
103  const PPCInstrInfo *TII;
104  MachineFunction *MF;
105  MachineRegisterInfo *MRI;
106 
107  // Swap entries are allocated in a vector for better performance.
108  std::vector<PPCVSXSwapEntry> SwapVector;
109 
110  // A mapping is maintained between machine instructions and
111  // their swap entries. The key is the address of the MI.
113 
114  // Equivalence classes are used to gather webs of related computation.
115  // Swap entries are represented by their VSEId fields.
117 
118  PPCVSXSwapRemoval() : MachineFunctionPass(ID) {
120  }
121 
122 private:
123  // Initialize data structures.
124  void initialize(MachineFunction &MFParm);
125 
126  // Walk the machine instructions to gather vector usage information.
127  // Return true iff vector mentions are present.
128  bool gatherVectorInstructions();
129 
130  // Add an entry to the swap vector and swap map.
131  int addSwapEntry(MachineInstr *MI, PPCVSXSwapEntry &SwapEntry);
132 
133  // Hunt backwards through COPY and SUBREG_TO_REG chains for a
134  // source register. VecIdx indicates the swap vector entry to
135  // mark as mentioning a physical register if the search leads
136  // to one.
137  unsigned lookThruCopyLike(unsigned SrcReg, unsigned VecIdx);
138 
139  // Generate equivalence classes for related computations (webs).
140  void formWebs();
141 
142  // Analyze webs and determine those that cannot be optimized.
143  void recordUnoptimizableWebs();
144 
145  // Record which swap instructions can be safely removed.
146  void markSwapsForRemoval();
147 
148  // Remove swaps and update other instructions requiring special
149  // handling. Return true iff any changes are made.
150  bool removeSwaps();
151 
152  // Update instructions requiring special handling.
153  void handleSpecialSwappables(int EntryIdx);
154 
155  // Dump a description of the entries in the swap vector.
156  void dumpSwapVector();
157 
158  // Return true iff the given register is in the given class.
159  bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) {
161  return RC->hasSubClassEq(MRI->getRegClass(Reg));
162  if (RC->contains(Reg))
163  return true;
164  return false;
165  }
166 
167  // Return true iff the given register is a full vector register.
168  bool isVecReg(unsigned Reg) {
169  return (isRegInClass(Reg, &PPC::VSRCRegClass) ||
170  isRegInClass(Reg, &PPC::VRRCRegClass));
171  }
172 
173  // Return true iff the given register is a partial vector register.
174  bool isScalarVecReg(unsigned Reg) {
175  return (isRegInClass(Reg, &PPC::VSFRCRegClass) ||
176  isRegInClass(Reg, &PPC::VSSRCRegClass));
177  }
178 
179  // Return true iff the given register mentions all or part of a
180  // vector register. Also sets Partial to true if the mention
181  // is for just the floating-point register overlap of the register.
182  bool isAnyVecReg(unsigned Reg, bool &Partial) {
183  if (isScalarVecReg(Reg))
184  Partial = true;
185  return isScalarVecReg(Reg) || isVecReg(Reg);
186  }
187 
188 public:
189  // Main entry point for this pass.
190  bool runOnMachineFunction(MachineFunction &MF) override {
191  // If we don't have VSX on the subtarget, don't do anything.
192  const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
193  if (!STI.hasVSX())
194  return false;
195 
196  bool Changed = false;
197  initialize(MF);
198 
199  if (gatherVectorInstructions()) {
200  formWebs();
201  recordUnoptimizableWebs();
202  markSwapsForRemoval();
203  Changed = removeSwaps();
204  }
205 
206  // FIXME: See the allocation of EC in initialize().
207  delete EC;
208  return Changed;
209  }
210 };
211 
212 // Initialize data structures for this pass. In particular, clear the
213 // swap vector and allocate the equivalence class mapping before
214 // processing each function.
216  MF = &MFParm;
217  MRI = &MF->getRegInfo();
218  TII = static_cast<const PPCInstrInfo*>(MF->getSubtarget().getInstrInfo());
219 
220  // An initial vector size of 256 appears to work well in practice.
221  // Small/medium functions with vector content tend not to incur a
222  // reallocation at this size. Three of the vector tests in
223  // projects/test-suite reallocate, which seems like a reasonable rate.
224  const int InitialVectorSize(256);
225  SwapVector.clear();
226  SwapVector.reserve(InitialVectorSize);
227 
228  // FIXME: Currently we allocate EC each time because we don't have
229  // access to the set representation on which to call clear(). Should
230  // consider adding a clear() method to the EquivalenceClasses class.
231  EC = new EquivalenceClasses<int>;
232 }
233 
234 // Create an entry in the swap vector for each instruction that mentions
235 // a full vector register, recording various characteristics of the
236 // instructions there.
237 bool PPCVSXSwapRemoval::gatherVectorInstructions() {
238  bool RelevantFunction = false;
239 
240  for (MachineBasicBlock &MBB : *MF) {
241  for (MachineInstr &MI : MBB) {
242 
243  bool RelevantInstr = false;
244  bool Partial = false;
245 
246  for (const MachineOperand &MO : MI.operands()) {
247  if (!MO.isReg())
248  continue;
249  unsigned Reg = MO.getReg();
250  if (isAnyVecReg(Reg, Partial)) {
251  RelevantInstr = true;
252  break;
253  }
254  }
255 
256  if (!RelevantInstr)
257  continue;
258 
259  RelevantFunction = true;
260 
261  // Create a SwapEntry initialized to zeros, then fill in the
262  // instruction and ID fields before pushing it to the back
263  // of the swap vector.
264  PPCVSXSwapEntry SwapEntry{};
265  int VecIdx = addSwapEntry(&MI, SwapEntry);
266 
267  switch(MI.getOpcode()) {
268  default:
269  // Unless noted otherwise, an instruction is considered
270  // safe for the optimization. There are a large number of
271  // such true-SIMD instructions (all vector math, logical,
272  // select, compare, etc.). However, if the instruction
273  // mentions a partial vector register and does not have
274  // special handling defined, it is not swappable.
275  if (Partial)
276  SwapVector[VecIdx].MentionsPartialVR = 1;
277  else
278  SwapVector[VecIdx].IsSwappable = 1;
279  break;
280  case PPC::XXPERMDI: {
281  // This is a swap if it is of the form XXPERMDI t, s, s, 2.
282  // Unfortunately, MachineCSE ignores COPY and SUBREG_TO_REG, so we
283  // can also see XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), 2,
284  // for example. We have to look through chains of COPY and
285  // SUBREG_TO_REG to find the real source value for comparison.
286  // If the real source value is a physical register, then mark the
287  // XXPERMDI as mentioning a physical register.
288  int immed = MI.getOperand(3).getImm();
289  if (immed == 2) {
290  unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(),
291  VecIdx);
292  unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(),
293  VecIdx);
294  if (trueReg1 == trueReg2)
295  SwapVector[VecIdx].IsSwap = 1;
296  else {
297  // We can still handle these if the two registers are not
298  // identical, by adjusting the form of the XXPERMDI.
299  SwapVector[VecIdx].IsSwappable = 1;
300  SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
301  }
302  // This is a doubleword splat if it is of the form
303  // XXPERMDI t, s, s, 0 or XXPERMDI t, s, s, 3. As above we
304  // must look through chains of copy-likes to find the source
305  // register. We turn off the marking for mention of a physical
306  // register, because splatting it is safe; the optimization
307  // will not swap the value in the physical register. Whether
308  // or not the two input registers are identical, we can handle
309  // these by adjusting the form of the XXPERMDI.
310  } else if (immed == 0 || immed == 3) {
311 
312  SwapVector[VecIdx].IsSwappable = 1;
313  SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
314 
315  unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(),
316  VecIdx);
317  unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(),
318  VecIdx);
319  if (trueReg1 == trueReg2)
320  SwapVector[VecIdx].MentionsPhysVR = 0;
321 
322  } else {
323  // We can still handle these by adjusting the form of the XXPERMDI.
324  SwapVector[VecIdx].IsSwappable = 1;
325  SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
326  }
327  break;
328  }
329  case PPC::LVX:
330  // Non-permuting loads are currently unsafe. We can use special
331  // handling for this in the future. By not marking these as
332  // IsSwap, we ensure computations containing them will be rejected
333  // for now.
334  SwapVector[VecIdx].IsLoad = 1;
335  break;
336  case PPC::LXVD2X:
337  case PPC::LXVW4X:
338  // Permuting loads are marked as both load and swap, and are
339  // safe for optimization.
340  SwapVector[VecIdx].IsLoad = 1;
341  SwapVector[VecIdx].IsSwap = 1;
342  break;
343  case PPC::STVX:
344  // Non-permuting stores are currently unsafe. We can use special
345  // handling for this in the future. By not marking these as
346  // IsSwap, we ensure computations containing them will be rejected
347  // for now.
348  SwapVector[VecIdx].IsStore = 1;
349  break;
350  case PPC::STXVD2X:
351  case PPC::STXVW4X:
352  // Permuting stores are marked as both store and swap, and are
353  // safe for optimization.
354  SwapVector[VecIdx].IsStore = 1;
355  SwapVector[VecIdx].IsSwap = 1;
356  break;
357  case PPC::COPY:
358  // These are fine provided they are moving between full vector
359  // register classes.
360  if (isVecReg(MI.getOperand(0).getReg()) &&
361  isVecReg(MI.getOperand(1).getReg()))
362  SwapVector[VecIdx].IsSwappable = 1;
363  // If we have a copy from one scalar floating-point register
364  // to another, we can accept this even if it is a physical
365  // register. The only way this gets involved is if it feeds
366  // a SUBREG_TO_REG, which is handled by introducing a swap.
367  else if (isScalarVecReg(MI.getOperand(0).getReg()) &&
368  isScalarVecReg(MI.getOperand(1).getReg()))
369  SwapVector[VecIdx].IsSwappable = 1;
370  break;
371  case PPC::SUBREG_TO_REG: {
372  // These are fine provided they are moving between full vector
373  // register classes. If they are moving from a scalar
374  // floating-point class to a vector class, we can handle those
375  // as well, provided we introduce a swap. It is generally the
376  // case that we will introduce fewer swaps than we remove, but
377  // (FIXME) a cost model could be used. However, introduced
378  // swaps could potentially be CSEd, so this is not trivial.
379  if (isVecReg(MI.getOperand(0).getReg()) &&
380  isVecReg(MI.getOperand(2).getReg()))
381  SwapVector[VecIdx].IsSwappable = 1;
382  else if (isVecReg(MI.getOperand(0).getReg()) &&
383  isScalarVecReg(MI.getOperand(2).getReg())) {
384  SwapVector[VecIdx].IsSwappable = 1;
385  SwapVector[VecIdx].SpecialHandling = SHValues::SH_COPYSCALAR;
386  }
387  break;
388  }
389  case PPC::VSPLTB:
390  case PPC::VSPLTH:
391  case PPC::VSPLTW:
392  // Splats are lane-sensitive, but we can use special handling
393  // to adjust the source lane for the splat. This is not yet
394  // implemented. When it is, we need to uncomment the following:
395  SwapVector[VecIdx].IsSwappable = 1;
396  SwapVector[VecIdx].SpecialHandling = SHValues::SH_SPLAT;
397  break;
398  // The presence of the following lane-sensitive operations in a
399  // web will kill the optimization, at least for now. For these
400  // we do nothing, causing the optimization to fail.
401  // FIXME: Some of these could be permitted with special handling,
402  // and will be phased in as time permits.
403  // FIXME: There is no simple and maintainable way to express a set
404  // of opcodes having a common attribute in TableGen. Should this
405  // change, this is a prime candidate to use such a mechanism.
406  case PPC::INLINEASM:
407  case PPC::EXTRACT_SUBREG:
408  case PPC::INSERT_SUBREG:
410  case PPC::LVEBX:
411  case PPC::LVEHX:
412  case PPC::LVEWX:
413  case PPC::LVSL:
414  case PPC::LVSR:
415  case PPC::LVXL:
416  case PPC::STVEBX:
417  case PPC::STVEHX:
418  case PPC::STVEWX:
419  case PPC::STVXL:
420  case PPC::STXSDX:
421  case PPC::VCIPHER:
422  case PPC::VCIPHERLAST:
423  case PPC::VMRGHB:
424  case PPC::VMRGHH:
425  case PPC::VMRGHW:
426  case PPC::VMRGLB:
427  case PPC::VMRGLH:
428  case PPC::VMRGLW:
429  case PPC::VMULESB:
430  case PPC::VMULESH:
431  case PPC::VMULESW:
432  case PPC::VMULEUB:
433  case PPC::VMULEUH:
434  case PPC::VMULEUW:
435  case PPC::VMULOSB:
436  case PPC::VMULOSH:
437  case PPC::VMULOSW:
438  case PPC::VMULOUB:
439  case PPC::VMULOUH:
440  case PPC::VMULOUW:
441  case PPC::VNCIPHER:
442  case PPC::VNCIPHERLAST:
443  case PPC::VPERM:
444  case PPC::VPERMXOR:
445  case PPC::VPKPX:
446  case PPC::VPKSHSS:
447  case PPC::VPKSHUS:
448  case PPC::VPKSDSS:
449  case PPC::VPKSDUS:
450  case PPC::VPKSWSS:
451  case PPC::VPKSWUS:
452  case PPC::VPKUDUM:
453  case PPC::VPKUDUS:
454  case PPC::VPKUHUM:
455  case PPC::VPKUHUS:
456  case PPC::VPKUWUM:
457  case PPC::VPKUWUS:
458  case PPC::VPMSUMB:
459  case PPC::VPMSUMD:
460  case PPC::VPMSUMH:
461  case PPC::VPMSUMW:
462  case PPC::VRLB:
463  case PPC::VRLD:
464  case PPC::VRLH:
465  case PPC::VRLW:
466  case PPC::VSBOX:
467  case PPC::VSHASIGMAD:
468  case PPC::VSHASIGMAW:
469  case PPC::VSL:
470  case PPC::VSLDOI:
471  case PPC::VSLO:
472  case PPC::VSR:
473  case PPC::VSRO:
474  case PPC::VSUM2SWS:
475  case PPC::VSUM4SBS:
476  case PPC::VSUM4SHS:
477  case PPC::VSUM4UBS:
478  case PPC::VSUMSWS:
479  case PPC::VUPKHPX:
480  case PPC::VUPKHSB:
481  case PPC::VUPKHSH:
482  case PPC::VUPKHSW:
483  case PPC::VUPKLPX:
484  case PPC::VUPKLSB:
485  case PPC::VUPKLSH:
486  case PPC::VUPKLSW:
487  case PPC::XXMRGHW:
488  case PPC::XXMRGLW:
489  // XXSLDWI could be replaced by a general permute with one of three
490  // permute control vectors (for shift values 1, 2, 3). However,
491  // VPERM has a more restrictive register class.
492  case PPC::XXSLDWI:
493  case PPC::XXSPLTW:
494  break;
495  }
496  }
497  }
498 
499  if (RelevantFunction) {
500  DEBUG(dbgs() << "Swap vector when first built\n\n");
501  dumpSwapVector();
502  }
503 
504  return RelevantFunction;
505 }
506 
507 // Add an entry to the swap vector and swap map, and make a
508 // singleton equivalence class for the entry.
509 int PPCVSXSwapRemoval::addSwapEntry(MachineInstr *MI,
510  PPCVSXSwapEntry& SwapEntry) {
511  SwapEntry.VSEMI = MI;
512  SwapEntry.VSEId = SwapVector.size();
513  SwapVector.push_back(SwapEntry);
514  EC->insert(SwapEntry.VSEId);
515  SwapMap[MI] = SwapEntry.VSEId;
516  return SwapEntry.VSEId;
517 }
518 
519 // This is used to find the "true" source register for an
520 // XXPERMDI instruction, since MachineCSE does not handle the
521 // "copy-like" operations (Copy and SubregToReg). Returns
522 // the original SrcReg unless it is the target of a copy-like
523 // operation, in which case we chain backwards through all
524 // such operations to the ultimate source register. If a
525 // physical register is encountered, we stop the search and
526 // flag the swap entry indicated by VecIdx (the original
527 // XXPERMDI) as mentioning a physical register.
528 unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg,
529  unsigned VecIdx) {
530  MachineInstr *MI = MRI->getVRegDef(SrcReg);
531  if (!MI->isCopyLike())
532  return SrcReg;
533 
534  unsigned CopySrcReg;
535  if (MI->isCopy())
536  CopySrcReg = MI->getOperand(1).getReg();
537  else {
538  assert(MI->isSubregToReg() && "bad opcode for lookThruCopyLike");
539  CopySrcReg = MI->getOperand(2).getReg();
540  }
541 
542  if (!TargetRegisterInfo::isVirtualRegister(CopySrcReg)) {
543  SwapVector[VecIdx].MentionsPhysVR = 1;
544  return CopySrcReg;
545  }
546 
547  return lookThruCopyLike(CopySrcReg, VecIdx);
548 }
549 
550 // Generate equivalence classes for related computations (webs) by
551 // def-use relationships of virtual registers. Mention of a physical
552 // register terminates the generation of equivalence classes as this
553 // indicates a use of a parameter, definition of a return value, use
554 // of a value returned from a call, or definition of a parameter to a
555 // call. Computations with physical register mentions are flagged
556 // as such so their containing webs will not be optimized.
557 void PPCVSXSwapRemoval::formWebs() {
558 
559  DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n");
560 
561  for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
562 
563  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
564 
565  DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " ");
566  DEBUG(MI->dump());
567 
568  // It's sufficient to walk vector uses and join them to their unique
569  // definitions. In addition, check full vector register operands
570  // for physical regs. We exclude partial-vector register operands
571  // because we can handle them if copied to a full vector.
572  for (const MachineOperand &MO : MI->operands()) {
573  if (!MO.isReg())
574  continue;
575 
576  unsigned Reg = MO.getReg();
577  if (!isVecReg(Reg) && !isScalarVecReg(Reg))
578  continue;
579 
581  if (!(MI->isCopy() && isScalarVecReg(Reg)))
582  SwapVector[EntryIdx].MentionsPhysVR = 1;
583  continue;
584  }
585 
586  if (!MO.isUse())
587  continue;
588 
589  MachineInstr* DefMI = MRI->getVRegDef(Reg);
590  assert(SwapMap.find(DefMI) != SwapMap.end() &&
591  "Inconsistency: def of vector reg not found in swap map!");
592  int DefIdx = SwapMap[DefMI];
593  (void)EC->unionSets(SwapVector[DefIdx].VSEId,
594  SwapVector[EntryIdx].VSEId);
595 
596  DEBUG(dbgs() << format("Unioning %d with %d\n", SwapVector[DefIdx].VSEId,
597  SwapVector[EntryIdx].VSEId));
598  DEBUG(dbgs() << " Def: ");
599  DEBUG(DefMI->dump());
600  }
601  }
602 }
603 
604 // Walk the swap vector entries looking for conditions that prevent their
605 // containing computations from being optimized. When such conditions are
606 // found, mark the representative of the computation's equivalence class
607 // as rejected.
608 void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
609 
610  DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n");
611 
612  for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
613  int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
614 
615  // If representative is already rejected, don't waste further time.
616  if (SwapVector[Repr].WebRejected)
617  continue;
618 
619  // Reject webs containing mentions of physical or partial registers, or
620  // containing operations that we don't know how to handle in a lane-
621  // permuted region.
622  if (SwapVector[EntryIdx].MentionsPhysVR ||
623  SwapVector[EntryIdx].MentionsPartialVR ||
624  !(SwapVector[EntryIdx].IsSwappable || SwapVector[EntryIdx].IsSwap)) {
625 
626  SwapVector[Repr].WebRejected = 1;
627 
628  DEBUG(dbgs() <<
629  format("Web %d rejected for physreg, partial reg, or not swap[pable]\n",
630  Repr));
631  DEBUG(dbgs() << " in " << EntryIdx << ": ");
632  DEBUG(SwapVector[EntryIdx].VSEMI->dump());
633  DEBUG(dbgs() << "\n");
634  }
635 
636  // Reject webs than contain swapping loads that feed something other
637  // than a swap instruction.
638  else if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) {
639  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
640  unsigned DefReg = MI->getOperand(0).getReg();
641 
642  // We skip debug instructions in the analysis. (Note that debug
643  // location information is still maintained by this optimization
644  // because it remains on the LXVD2X and STXVD2X instructions after
645  // the XXPERMDIs are removed.)
646  for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
647  int UseIdx = SwapMap[&UseMI];
648 
649  if (!SwapVector[UseIdx].IsSwap || SwapVector[UseIdx].IsLoad ||
650  SwapVector[UseIdx].IsStore) {
651 
652  SwapVector[Repr].WebRejected = 1;
653 
654  DEBUG(dbgs() <<
655  format("Web %d rejected for load not feeding swap\n", Repr));
656  DEBUG(dbgs() << " def " << EntryIdx << ": ");
657  DEBUG(MI->dump());
658  DEBUG(dbgs() << " use " << UseIdx << ": ");
659  DEBUG(UseMI.dump());
660  DEBUG(dbgs() << "\n");
661  }
662  }
663 
664  // Reject webs that contain swapping stores that are fed by something
665  // other than a swap instruction.
666  } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) {
667  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
668  unsigned UseReg = MI->getOperand(0).getReg();
669  MachineInstr *DefMI = MRI->getVRegDef(UseReg);
670  int DefIdx = SwapMap[DefMI];
671 
672  if (!SwapVector[DefIdx].IsSwap || SwapVector[DefIdx].IsLoad ||
673  SwapVector[DefIdx].IsStore) {
674 
675  SwapVector[Repr].WebRejected = 1;
676 
677  DEBUG(dbgs() <<
678  format("Web %d rejected for store not fed by swap\n", Repr));
679  DEBUG(dbgs() << " def " << DefIdx << ": ");
680  DEBUG(DefMI->dump());
681  DEBUG(dbgs() << " use " << EntryIdx << ": ");
682  DEBUG(MI->dump());
683  DEBUG(dbgs() << "\n");
684  }
685  }
686  }
687 
688  DEBUG(dbgs() << "Swap vector after web analysis:\n\n");
689  dumpSwapVector();
690 }
691 
692 // Walk the swap vector entries looking for swaps fed by permuting loads
693 // and swaps that feed permuting stores. If the containing computation
694 // has not been marked rejected, mark each such swap for removal.
695 // (Removal is delayed in case optimization has disturbed the pattern,
696 // such that multiple loads feed the same swap, etc.)
697 void PPCVSXSwapRemoval::markSwapsForRemoval() {
698 
699  DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n");
700 
701  for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
702 
703  if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) {
704  int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
705 
706  if (!SwapVector[Repr].WebRejected) {
707  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
708  unsigned DefReg = MI->getOperand(0).getReg();
709 
710  for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
711  int UseIdx = SwapMap[&UseMI];
712  SwapVector[UseIdx].WillRemove = 1;
713 
714  DEBUG(dbgs() << "Marking swap fed by load for removal: ");
715  DEBUG(UseMI.dump());
716  }
717  }
718 
719  } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) {
720  int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
721 
722  if (!SwapVector[Repr].WebRejected) {
723  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
724  unsigned UseReg = MI->getOperand(0).getReg();
725  MachineInstr *DefMI = MRI->getVRegDef(UseReg);
726  int DefIdx = SwapMap[DefMI];
727  SwapVector[DefIdx].WillRemove = 1;
728 
729  DEBUG(dbgs() << "Marking swap feeding store for removal: ");
730  DEBUG(DefMI->dump());
731  }
732 
733  } else if (SwapVector[EntryIdx].IsSwappable &&
734  SwapVector[EntryIdx].SpecialHandling != 0) {
735  int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
736 
737  if (!SwapVector[Repr].WebRejected)
738  handleSpecialSwappables(EntryIdx);
739  }
740  }
741 }
742 
743 // The identified swap entry requires special handling to allow its
744 // containing computation to be optimized. Perform that handling
745 // here.
746 // FIXME: Additional opportunities will be phased in with subsequent
747 // patches.
748 void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
749  switch (SwapVector[EntryIdx].SpecialHandling) {
750 
751  default:
752  assert(false && "Unexpected special handling type");
753  break;
754 
755  // For splats based on an index into a vector, add N/2 modulo N
756  // to the index, where N is the number of vector elements.
757  case SHValues::SH_SPLAT: {
758  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
759  unsigned NElts;
760 
761  DEBUG(dbgs() << "Changing splat: ");
762  DEBUG(MI->dump());
763 
764  switch (MI->getOpcode()) {
765  default:
766  assert(false && "Unexpected splat opcode");
767  case PPC::VSPLTB: NElts = 16; break;
768  case PPC::VSPLTH: NElts = 8; break;
769  case PPC::VSPLTW: NElts = 4; break;
770  }
771 
772  unsigned EltNo = MI->getOperand(1).getImm();
773  EltNo = (EltNo + NElts / 2) % NElts;
774  MI->getOperand(1).setImm(EltNo);
775 
776  DEBUG(dbgs() << " Into: ");
777  DEBUG(MI->dump());
778  break;
779  }
780 
781  // For an XXPERMDI that isn't handled otherwise, we need to
782  // reverse the order of the operands. If the selector operand
783  // has a value of 0 or 3, we need to change it to 3 or 0,
784  // respectively. Otherwise we should leave it alone. (This
785  // is equivalent to reversing the two bits of the selector
786  // operand and complementing the result.)
787  case SHValues::SH_XXPERMDI: {
788  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
789 
790  DEBUG(dbgs() << "Changing XXPERMDI: ");
791  DEBUG(MI->dump());
792 
793  unsigned Selector = MI->getOperand(3).getImm();
794  if (Selector == 0 || Selector == 3)
795  Selector = 3 - Selector;
796  MI->getOperand(3).setImm(Selector);
797 
798  unsigned Reg1 = MI->getOperand(1).getReg();
799  unsigned Reg2 = MI->getOperand(2).getReg();
800  MI->getOperand(1).setReg(Reg2);
801  MI->getOperand(2).setReg(Reg1);
802 
803  DEBUG(dbgs() << " Into: ");
804  DEBUG(MI->dump());
805  break;
806  }
807 
808  // For a copy from a scalar floating-point register to a vector
809  // register, removing swaps will leave the copied value in the
810  // wrong lane. Insert a swap following the copy to fix this.
811  case SHValues::SH_COPYSCALAR: {
812  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
813 
814  DEBUG(dbgs() << "Changing SUBREG_TO_REG: ");
815  DEBUG(MI->dump());
816 
817  unsigned DstReg = MI->getOperand(0).getReg();
818  const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
819  unsigned NewVReg = MRI->createVirtualRegister(DstRC);
820 
821  MI->getOperand(0).setReg(NewVReg);
822  DEBUG(dbgs() << " Into: ");
823  DEBUG(MI->dump());
824 
825  MachineBasicBlock::iterator InsertPoint = MI->getNextNode();
826 
827  // Note that an XXPERMDI requires a VSRC, so if the SUBREG_TO_REG
828  // is copying to a VRRC, we need to be careful to avoid a register
829  // assignment problem. In this case we must copy from VRRC to VSRC
830  // prior to the swap, and from VSRC to VRRC following the swap.
831  // Coalescing will usually remove all this mess.
832 
833  if (DstRC == &PPC::VRRCRegClass) {
834  unsigned VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass);
835  unsigned VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass);
836 
837  BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
838  TII->get(PPC::COPY), VSRCTmp1)
839  .addReg(NewVReg);
840  DEBUG(MI->getNextNode()->dump());
841 
842  BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
843  TII->get(PPC::XXPERMDI), VSRCTmp2)
844  .addReg(VSRCTmp1)
845  .addReg(VSRCTmp1)
846  .addImm(2);
847  DEBUG(MI->getNextNode()->getNextNode()->dump());
848 
849  BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
850  TII->get(PPC::COPY), DstReg)
851  .addReg(VSRCTmp2);
852  DEBUG(MI->getNextNode()->getNextNode()->getNextNode()->dump());
853 
854  } else {
855 
856  BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
857  TII->get(PPC::XXPERMDI), DstReg)
858  .addReg(NewVReg)
859  .addReg(NewVReg)
860  .addImm(2);
861 
862  DEBUG(MI->getNextNode()->dump());
863  }
864  break;
865  }
866  }
867 }
868 
869 // Walk the swap vector and replace each entry marked for removal with
870 // a copy operation.
871 bool PPCVSXSwapRemoval::removeSwaps() {
872 
873  DEBUG(dbgs() << "\n*** Removing swaps ***\n\n");
874 
875  bool Changed = false;
876 
877  for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
878  if (SwapVector[EntryIdx].WillRemove) {
879  Changed = true;
880  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
881  MachineBasicBlock *MBB = MI->getParent();
882  BuildMI(*MBB, MI, MI->getDebugLoc(),
883  TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
884  .addOperand(MI->getOperand(1));
885 
886  DEBUG(dbgs() << format("Replaced %d with copy: ",
887  SwapVector[EntryIdx].VSEId));
888  DEBUG(MI->dump());
889 
890  MI->eraseFromParent();
891  }
892  }
893 
894  return Changed;
895 }
896 
897 // For debug purposes, dump the contents of the swap vector.
898 void PPCVSXSwapRemoval::dumpSwapVector() {
899 
900  for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
901 
902  MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
903  int ID = SwapVector[EntryIdx].VSEId;
904 
905  DEBUG(dbgs() << format("%6d", ID));
906  DEBUG(dbgs() << format("%6d", EC->getLeaderValue(ID)));
907  DEBUG(dbgs() << format(" BB#%3d", MI->getParent()->getNumber()));
908  DEBUG(dbgs() << format(" %14s ", TII->getName(MI->getOpcode())));
909 
910  if (SwapVector[EntryIdx].IsLoad)
911  DEBUG(dbgs() << "load ");
912  if (SwapVector[EntryIdx].IsStore)
913  DEBUG(dbgs() << "store ");
914  if (SwapVector[EntryIdx].IsSwap)
915  DEBUG(dbgs() << "swap ");
916  if (SwapVector[EntryIdx].MentionsPhysVR)
917  DEBUG(dbgs() << "physreg ");
918  if (SwapVector[EntryIdx].MentionsPartialVR)
919  DEBUG(dbgs() << "partialreg ");
920 
921  if (SwapVector[EntryIdx].IsSwappable) {
922  DEBUG(dbgs() << "swappable ");
923  switch(SwapVector[EntryIdx].SpecialHandling) {
924  default:
925  DEBUG(dbgs() << "special:**unknown**");
926  break;
927  case SH_NONE:
928  break;
929  case SH_EXTRACT:
930  DEBUG(dbgs() << "special:extract ");
931  break;
932  case SH_INSERT:
933  DEBUG(dbgs() << "special:insert ");
934  break;
935  case SH_NOSWAP_LD:
936  DEBUG(dbgs() << "special:load ");
937  break;
938  case SH_NOSWAP_ST:
939  DEBUG(dbgs() << "special:store ");
940  break;
941  case SH_SPLAT:
942  DEBUG(dbgs() << "special:splat ");
943  break;
944  case SH_XXPERMDI:
945  DEBUG(dbgs() << "special:xxpermdi ");
946  break;
947  case SH_COPYSCALAR:
948  DEBUG(dbgs() << "special:copyscalar ");
949  break;
950  }
951  }
952 
953  if (SwapVector[EntryIdx].WebRejected)
954  DEBUG(dbgs() << "rejected ");
955  if (SwapVector[EntryIdx].WillRemove)
956  DEBUG(dbgs() << "remove ");
957 
958  DEBUG(dbgs() << "\n");
959 
960  // For no-asserts builds.
961  (void)MI;
962  (void)ID;
963  }
964 
965  DEBUG(dbgs() << "\n");
966 }
967 
968 } // end default namespace
969 
970 INITIALIZE_PASS_BEGIN(PPCVSXSwapRemoval, DEBUG_TYPE,
971  "PowerPC VSX Swap Removal", false, false)
972 INITIALIZE_PASS_END(PPCVSXSwapRemoval, DEBUG_TYPE,
973  "PowerPC VSX Swap Removal", false, false)
974 
975 char PPCVSXSwapRemoval::ID = 0;
977 llvm::createPPCVSXSwapRemovalPass() { return new PPCVSXSwapRemoval(); }
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
FunctionPass * createPPCVSXSwapRemovalPass()
int getNumber() const
getNumber - MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a M...
bool hasSubClassEq(const TargetRegisterClass *RC) const
hasSubClassEq - Returns true if RC is a sub-class of or equal to this class.
static bool isVirtualRegister(unsigned Reg)
isVirtualRegister - Return true if the specified register number is in the virtual register namespace...
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:295
COPY - Target-independent register copy.
Definition: TargetOpcodes.h:86
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
PowerPC VSX Swap Removal
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
NodeTy * getNextNode()
Get the next node, or 0 for the list tail.
Definition: ilist_node.h:80
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:75
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:571
bool isCopyLike() const
Return true if the instruction behaves like a copy.
Definition: MachineInstr.h:790
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
int64_t getImm() const
void initializePPCVSXSwapRemovalPass(PassRegistry &)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:267
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:120
format_object< Ts...> format(const char *Fmt, const Ts &...Vals)
These are helper functions used to produce formatted output.
Definition: Format.h:111
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
bundle_iterator< MachineInstr, instr_iterator > iterator
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:273
INSERT_SUBREG - This instruction takes three operands: a register that has subregisters, a register providing an insert value, and a subregister index.
Definition: TargetOpcodes.h:49
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
bool isCopy() const
Definition: MachineInstr.h:778
void setImm(int64_t immVal)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:294
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, const char *const *StandardNames)
initialize - Initialize the set of available library functions based on the specified target triple...
EXTRACT_SUBREG - This instruction takes two operands: a register that has subregisters, and a subregister index.
Definition: TargetOpcodes.h:41
MachineOperand class - Representation of each machine instruction operand.
VPERM - The PPC VPERM Instruction.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:123
void dump() const
bool isSubregToReg() const
Definition: MachineInstr.h:769
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:238
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:51
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static unsigned UseReg(const MachineOperand &MO)
void setReg(unsigned Reg)
Change the register this operand corresponds to.
COPY_TO_REGCLASS - This instruction is a placeholder for a plain register-to-register copy into a spe...
Definition: TargetOpcodes.h:66
bool hasVSX() const
Definition: PPCSubtarget.h:224
PowerPC VSX Swap false
unsigned getReg() const
getReg - Returns the register number.
INITIALIZE_PASS_BEGIN(PPCVSXSwapRemoval, DEBUG_TYPE,"PowerPC VSX Swap Removal", false, false) INITIALIZE_PASS_END(PPCVSXSwapRemoval
virtual const TargetInstrInfo * getInstrInfo() const
#define DEBUG(X)
Definition: Debug.h:92
#define DEBUG_TYPE
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:41
SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that the first operand is an imme...
Definition: TargetOpcodes.h:58
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.