LLVM  4.0.0
AArch64VectorByElementOpt.cpp
Go to the documentation of this file.
1 //=- AArch64VectorByElementOpt.cpp - AArch64 vector by element inst opt pass =//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that performs optimization for vector by element
11 // SIMD instructions.
12 //
13 // Certain SIMD instructions with vector element operand are not efficient.
14 // Rewrite them into SIMD instructions with vector operands. This rewrite
15 // is driven by the latency of the instructions.
16 //
17 // Example:
18 // fmla v0.4s, v1.4s, v2.s[1]
19 // is rewritten into
20 // dup v3.4s, v2.s[1]
21 // fmla v0.4s, v1.4s, v3.4s
22 //===----------------------------------------------------------------------===//
23 
24 #include "AArch64InstrInfo.h"
25 #include "llvm/ADT/Statistic.h"
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "aarch64-vectorbyelement-opt"
33 
34 STATISTIC(NumModifiedInstr,
35  "Number of vector by element instructions modified");
36 
37 #define AARCH64_VECTOR_BY_ELEMENT_OPT_NAME \
38  "AArch64 vector by element instruction optimization pass"
39 
40 namespace {
41 
42 struct AArch64VectorByElementOpt : public MachineFunctionPass {
43  static char ID;
44  AArch64VectorByElementOpt() : MachineFunctionPass(ID) {
46  }
47 
48  const TargetInstrInfo *TII;
50  TargetSchedModel SchedModel;
51 
52  /// Based only on latency of instructions, determine if it is cost efficient
53  /// to replace the instruction InstDesc by the two instructions InstDescRep1
54  /// and InstDescRep2.
55  /// Return true if replacement is recommended.
56  bool
57  shouldReplaceInstruction(MachineFunction *MF, const MCInstrDesc *InstDesc,
58  const MCInstrDesc *InstDescRep1,
59  const MCInstrDesc *InstDescRep2,
60  std::map<unsigned, bool> &VecInstElemTable) const;
61 
62  /// Determine if we need to exit the vector by element instruction
63  /// optimization pass early. This makes sure that Targets with no need
64  /// for this optimization do not spent any compile time on this pass.
65  /// This check is done by comparing the latency of an indexed FMLA
66  /// instruction to the latency of the DUP + the latency of a vector
67  /// FMLA instruction. We do not check on other related instructions such
68  /// as FMLS as we assume that if the situation shows up for one
69  /// instruction, then it is likely to show up for the related ones.
70  /// Return true if early exit of the pass is recommended.
71  bool earlyExitVectElement(MachineFunction *MF);
72 
73  /// Check whether an equivalent DUP instruction has already been
74  /// created or not.
75  /// Return true when the dup instruction already exists. In this case,
76  /// DestReg will point to the destination of the already created DUP.
77  bool reuseDUP(MachineInstr &MI, unsigned DupOpcode, unsigned SrcReg,
78  unsigned LaneNumber, unsigned *DestReg) const;
79 
80  /// Certain SIMD instructions with vector element operand are not efficient.
81  /// Rewrite them into SIMD instructions with vector operands. This rewrite
82  /// is driven by the latency of the instructions.
83  /// Return true if the SIMD instruction is modified.
84  bool optimizeVectElement(MachineInstr &MI,
85  std::map<unsigned, bool> *VecInstElemTable) const;
86 
87  bool runOnMachineFunction(MachineFunction &Fn) override;
88 
89  StringRef getPassName() const override {
91  }
92 };
94 } // namespace
95 
96 INITIALIZE_PASS(AArch64VectorByElementOpt, "aarch64-vectorbyelement-opt",
98 
99 /// Based only on latency of instructions, determine if it is cost efficient
100 /// to replace the instruction InstDesc by the two instructions InstDescRep1
101 /// and InstDescRep2. Note that it is assumed in this fuction that an
102 /// instruction of type InstDesc is always replaced by the same two
103 /// instructions as results are cached here.
104 /// Return true if replacement is recommended.
105 bool AArch64VectorByElementOpt::shouldReplaceInstruction(
106  MachineFunction *MF, const MCInstrDesc *InstDesc,
107  const MCInstrDesc *InstDescRep1, const MCInstrDesc *InstDescRep2,
108  std::map<unsigned, bool> &VecInstElemTable) const {
109  // Check if replacment decision is alredy available in the cached table.
110  // if so, return it.
111  if (!VecInstElemTable.empty() &&
112  VecInstElemTable.find(InstDesc->getOpcode()) != VecInstElemTable.end())
113  return VecInstElemTable[InstDesc->getOpcode()];
114 
115  unsigned SCIdx = InstDesc->getSchedClass();
116  unsigned SCIdxRep1 = InstDescRep1->getSchedClass();
117  unsigned SCIdxRep2 = InstDescRep2->getSchedClass();
119  SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
121  SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep1);
123  SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep2);
124 
125  // If a subtarget does not define resources for any of the instructions
126  // of interest, then return false for no replacement.
127  if (!SCDesc->isValid() || SCDesc->isVariant() || !SCDescRep1->isValid() ||
128  SCDescRep1->isVariant() || !SCDescRep2->isValid() ||
129  SCDescRep2->isVariant()) {
130  VecInstElemTable[InstDesc->getOpcode()] = false;
131  return false;
132  }
133 
134  if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) >
135  SchedModel.computeInstrLatency(InstDescRep1->getOpcode()) +
136  SchedModel.computeInstrLatency(InstDescRep2->getOpcode())) {
137  VecInstElemTable[InstDesc->getOpcode()] = true;
138  return true;
139  }
140  VecInstElemTable[InstDesc->getOpcode()] = false;
141  return false;
142 }
143 
144 /// Determine if we need to exit the vector by element instruction
145 /// optimization pass early. This makes sure that Targets with no need
146 /// for this optimization do not spent any compile time on this pass.
147 /// This check is done by comparing the latency of an indexed FMLA
148 /// instruction to the latency of the DUP + the latency of a vector
149 /// FMLA instruction. We do not check on other related instructions such
150 /// as FMLS as we assume that if the situation shows up for one
151 /// instruction, then it is likely to show up for the related ones.
152 /// Return true if early exit of the pass is recommended.
153 bool AArch64VectorByElementOpt::earlyExitVectElement(MachineFunction *MF) {
154  std::map<unsigned, bool> VecInstElemTable;
155  const MCInstrDesc *IndexMulMCID = &TII->get(AArch64::FMLAv4i32_indexed);
156  const MCInstrDesc *DupMCID = &TII->get(AArch64::DUPv4i32lane);
157  const MCInstrDesc *MulMCID = &TII->get(AArch64::FMULv4f32);
158 
159  if (!shouldReplaceInstruction(MF, IndexMulMCID, DupMCID, MulMCID,
160  VecInstElemTable))
161  return true;
162  return false;
163 }
164 
165 /// Check whether an equivalent DUP instruction has already been
166 /// created or not.
167 /// Return true when the dup instruction already exists. In this case,
168 /// DestReg will point to the destination of the already created DUP.
169 bool AArch64VectorByElementOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode,
170  unsigned SrcReg, unsigned LaneNumber,
171  unsigned *DestReg) const {
172  for (MachineBasicBlock::iterator MII = MI, MIE = MI.getParent()->begin();
173  MII != MIE;) {
174  MII--;
175  MachineInstr *CurrentMI = &*MII;
176 
177  if (CurrentMI->getOpcode() == DupOpcode &&
178  CurrentMI->getNumOperands() == 3 &&
179  CurrentMI->getOperand(1).getReg() == SrcReg &&
180  CurrentMI->getOperand(2).getImm() == LaneNumber) {
181  *DestReg = CurrentMI->getOperand(0).getReg();
182  return true;
183  }
184  }
185 
186  return false;
187 }
188 
189 /// Certain SIMD instructions with vector element operand are not efficient.
190 /// Rewrite them into SIMD instructions with vector operands. This rewrite
191 /// is driven by the latency of the instructions.
192 /// The instruction of concerns are for the time being fmla, fmls, fmul,
193 /// and fmulx and hence they are hardcoded.
194 ///
195 /// Example:
196 /// fmla v0.4s, v1.4s, v2.s[1]
197 /// is rewritten into
198 /// dup v3.4s, v2.s[1] // dup not necessary if redundant
199 /// fmla v0.4s, v1.4s, v3.4s
200 /// Return true if the SIMD instruction is modified.
201 bool AArch64VectorByElementOpt::optimizeVectElement(
202  MachineInstr &MI, std::map<unsigned, bool> *VecInstElemTable) const {
203  const MCInstrDesc *MulMCID, *DupMCID;
204  const TargetRegisterClass *RC = &AArch64::FPR128RegClass;
205 
206  switch (MI.getOpcode()) {
207  default:
208  return false;
209 
210  // 4X32 instructions
211  case AArch64::FMLAv4i32_indexed:
212  DupMCID = &TII->get(AArch64::DUPv4i32lane);
213  MulMCID = &TII->get(AArch64::FMLAv4f32);
214  break;
215  case AArch64::FMLSv4i32_indexed:
216  DupMCID = &TII->get(AArch64::DUPv4i32lane);
217  MulMCID = &TII->get(AArch64::FMLSv4f32);
218  break;
219  case AArch64::FMULXv4i32_indexed:
220  DupMCID = &TII->get(AArch64::DUPv4i32lane);
221  MulMCID = &TII->get(AArch64::FMULXv4f32);
222  break;
223  case AArch64::FMULv4i32_indexed:
224  DupMCID = &TII->get(AArch64::DUPv4i32lane);
225  MulMCID = &TII->get(AArch64::FMULv4f32);
226  break;
227 
228  // 2X64 instructions
229  case AArch64::FMLAv2i64_indexed:
230  DupMCID = &TII->get(AArch64::DUPv2i64lane);
231  MulMCID = &TII->get(AArch64::FMLAv2f64);
232  break;
233  case AArch64::FMLSv2i64_indexed:
234  DupMCID = &TII->get(AArch64::DUPv2i64lane);
235  MulMCID = &TII->get(AArch64::FMLSv2f64);
236  break;
237  case AArch64::FMULXv2i64_indexed:
238  DupMCID = &TII->get(AArch64::DUPv2i64lane);
239  MulMCID = &TII->get(AArch64::FMULXv2f64);
240  break;
241  case AArch64::FMULv2i64_indexed:
242  DupMCID = &TII->get(AArch64::DUPv2i64lane);
243  MulMCID = &TII->get(AArch64::FMULv2f64);
244  break;
245 
246  // 2X32 instructions
247  case AArch64::FMLAv2i32_indexed:
248  RC = &AArch64::FPR64RegClass;
249  DupMCID = &TII->get(AArch64::DUPv2i32lane);
250  MulMCID = &TII->get(AArch64::FMLAv2f32);
251  break;
252  case AArch64::FMLSv2i32_indexed:
253  RC = &AArch64::FPR64RegClass;
254  DupMCID = &TII->get(AArch64::DUPv2i32lane);
255  MulMCID = &TII->get(AArch64::FMLSv2f32);
256  break;
257  case AArch64::FMULXv2i32_indexed:
258  RC = &AArch64::FPR64RegClass;
259  DupMCID = &TII->get(AArch64::DUPv2i32lane);
260  MulMCID = &TII->get(AArch64::FMULXv2f32);
261  break;
262  case AArch64::FMULv2i32_indexed:
263  RC = &AArch64::FPR64RegClass;
264  DupMCID = &TII->get(AArch64::DUPv2i32lane);
265  MulMCID = &TII->get(AArch64::FMULv2f32);
266  break;
267  }
268 
269  if (!shouldReplaceInstruction(MI.getParent()->getParent(),
270  &TII->get(MI.getOpcode()), DupMCID, MulMCID,
271  *VecInstElemTable))
272  return false;
273 
274  const DebugLoc &DL = MI.getDebugLoc();
277 
278  // get the operands of the current SIMD arithmetic instruction.
279  unsigned MulDest = MI.getOperand(0).getReg();
280  unsigned SrcReg0 = MI.getOperand(1).getReg();
281  unsigned Src0IsKill = getKillRegState(MI.getOperand(1).isKill());
282  unsigned SrcReg1 = MI.getOperand(2).getReg();
283  unsigned Src1IsKill = getKillRegState(MI.getOperand(2).isKill());
284  unsigned DupDest;
285 
286  // Instructions of interest have either 4 or 5 operands.
287  if (MI.getNumOperands() == 5) {
288  unsigned SrcReg2 = MI.getOperand(3).getReg();
289  unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill());
290  unsigned LaneNumber = MI.getOperand(4).getImm();
291 
292  // Create a new DUP instruction. Note that if an equivalent DUP instruction
293  // has already been created before, then use that one instread of creating
294  // a new one.
295  if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg2, LaneNumber, &DupDest)) {
296  DupDest = MRI.createVirtualRegister(RC);
297  BuildMI(MBB, MI, DL, *DupMCID, DupDest)
298  .addReg(SrcReg2, Src2IsKill)
299  .addImm(LaneNumber);
300  }
301  BuildMI(MBB, MI, DL, *MulMCID, MulDest)
302  .addReg(SrcReg0, Src0IsKill)
303  .addReg(SrcReg1, Src1IsKill)
304  .addReg(DupDest, Src2IsKill);
305  } else if (MI.getNumOperands() == 4) {
306  unsigned LaneNumber = MI.getOperand(3).getImm();
307  if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg1, LaneNumber, &DupDest)) {
308  DupDest = MRI.createVirtualRegister(RC);
309  BuildMI(MBB, MI, DL, *DupMCID, DupDest)
310  .addReg(SrcReg1, Src1IsKill)
311  .addImm(LaneNumber);
312  }
313  BuildMI(MBB, MI, DL, *MulMCID, MulDest)
314  .addReg(SrcReg0, Src0IsKill)
315  .addReg(DupDest, Src1IsKill);
316  } else {
317  return false;
318  }
319 
320  ++NumModifiedInstr;
321  return true;
322 }
323 
324 bool AArch64VectorByElementOpt::runOnMachineFunction(MachineFunction &MF) {
325  if (skipFunction(*MF.getFunction()))
326  return false;
327 
328  TII = MF.getSubtarget().getInstrInfo();
329  MRI = &MF.getRegInfo();
330  const TargetSubtargetInfo &ST = MF.getSubtarget();
331  const AArch64InstrInfo *AAII =
332  static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
333  if (!AAII)
334  return false;
335  SchedModel.init(ST.getSchedModel(), &ST, AAII);
336  if (!SchedModel.hasInstrSchedModel())
337  return false;
338 
339  // A simple check to exit this pass early for targets that do not need it.
340  if (earlyExitVectElement(&MF))
341  return false;
342 
343  bool Changed = false;
344  std::map<unsigned, bool> VecInstElemTable;
346 
347  for (MachineBasicBlock &MBB : MF) {
348  for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end();
349  MII != MIE;) {
350  MachineInstr &MI = *MII;
351  if (optimizeVectElement(MI, &VecInstElemTable)) {
352  // Add MI to the list of instructions to be removed given that it has
353  // been replaced.
354  RemoveMIs.push_back(&MI);
355  Changed = true;
356  }
357  ++MII;
358  }
359  }
360 
361  for (MachineInstr *MI : RemoveMIs)
362  MI->eraseFromParent();
363 
364  return Changed;
365 }
366 
367 /// createAArch64VectorByElementOptPass - returns an instance of the
368 /// vector by element optimization pass.
370  return new AArch64VectorByElementOpt();
371 }
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
STATISTIC(NumFunctions,"Total number of functions")
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
#define AARCH64_VECTOR_BY_ELEMENT_OPT_NAME
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
A debug info location.
Definition: DebugLoc.h:34
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
FunctionPass * createAArch64VectorByElementOptPass()
createAArch64VectorByElementOptPass - returns an instance of the vector by element optimization pass...
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:277
unsigned SCIdxRep1
INITIALIZE_PASS(AArch64VectorByElementOpt,"aarch64-vectorbyelement-opt", AARCH64_VECTOR_BY_ELEMENT_OPT_NAME, false, false) bool AArch64VectorByElementOpt unsigned SCIdx
Based only on latency of instructions, determine if it is cost efficient to replace the instruction I...
bool isKill() const
MachineBasicBlock * MBB
const MCSchedClassDesc * SCDescRep2
int64_t getImm() const
bool isValid() const
Definition: MCSchedule.h:118
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
TargetInstrInfo - Interface to description of machine instruction set.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned const MachineRegisterInfo * MRI
void initializeAArch64VectorByElementOptPass(PassRegistry &)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
Summarize the scheduling resources required for an instruction of a particular scheduling class...
Definition: MCSchedule.h:101
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
VecInstElemTable[InstDesc->getOpcode()]
bool isVariant() const
Definition: MCSchedule.h:121
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:36
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:203
const MCSchedClassDesc * SCDescRep1
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
unsigned SCIdxRep2
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
TargetSubtargetInfo - Generic base class for all target subtargets.
Representation of each machine instruction.
Definition: MachineInstr.h:52
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getReg() const
getReg - Returns the register number.
aarch64 promote const
virtual const TargetInstrInfo * getInstrInfo() const
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MCSchedClassDesc * SCDesc