LLVM  9.0.0svn
X86EvexToVex.cpp
Go to the documentation of this file.
1 //===- X86EvexToVex.cpp ---------------------------------------------------===//
2 // Compress EVEX instructions to VEX encoding when possible to reduce code size
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file defines the pass that goes over all AVX-512 instructions which
12 /// are encoded using the EVEX prefix and if possible replaces them by their
13 /// corresponding VEX encoding which is usually shorter by 2 bytes.
14 /// EVEX instructions may be encoded via the VEX prefix when the AVX-512
15 /// instruction has a corresponding AVX/AVX2 opcode and when it does not
16 /// use the xmm or the mask registers or xmm/ymm registers with indexes
17 /// higher than 15.
18 /// The pass applies code reduction on the generated code for AVX-512 instrs.
19 //
20 //===----------------------------------------------------------------------===//
21 
24 #include "X86.h"
25 #include "X86InstrInfo.h"
26 #include "X86Subtarget.h"
27 #include "llvm/ADT/StringRef.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/Pass.h"
34 #include <cassert>
35 #include <cstdint>
36 
37 using namespace llvm;
38 
39 // Including the generated EVEX2VEX tables.
41  uint16_t EvexOpcode;
42  uint16_t VexOpcode;
43 
44  bool operator<(const X86EvexToVexCompressTableEntry &RHS) const {
45  return EvexOpcode < RHS.EvexOpcode;
46  }
47 
49  unsigned Opc) {
50  return TE.EvexOpcode < Opc;
51  }
52 };
53 #include "X86GenEVEX2VEXTables.inc"
54 
55 #define EVEX2VEX_DESC "Compressing EVEX instrs to VEX encoding when possible"
56 #define EVEX2VEX_NAME "x86-evex-to-vex-compress"
57 
58 #define DEBUG_TYPE EVEX2VEX_NAME
59 
60 namespace {
61 
62 class EvexToVexInstPass : public MachineFunctionPass {
63 
64  /// For EVEX instructions that can be encoded using VEX encoding, replace
65  /// them by the VEX encoding in order to reduce size.
66  bool CompressEvexToVexImpl(MachineInstr &MI) const;
67 
68 public:
69  static char ID;
70 
71  EvexToVexInstPass() : MachineFunctionPass(ID) {
73  }
74 
75  StringRef getPassName() const override { return EVEX2VEX_DESC; }
76 
77  /// Loop over all of the basic blocks, replacing EVEX instructions
78  /// by equivalent VEX instructions when possible for reducing code size.
79  bool runOnMachineFunction(MachineFunction &MF) override;
80 
81  // This pass runs after regalloc and doesn't support VReg operands.
82  MachineFunctionProperties getRequiredProperties() const override {
85  }
86 
87 private:
88  /// Machine instruction info used throughout the class.
89  const X86InstrInfo *TII;
90 };
91 
92 } // end anonymous namespace
93 
94 char EvexToVexInstPass::ID = 0;
95 
96 bool EvexToVexInstPass::runOnMachineFunction(MachineFunction &MF) {
97  TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
98 
100  if (!ST.hasAVX512())
101  return false;
102 
103  bool Changed = false;
104 
105  /// Go over all basic blocks in function and replace
106  /// EVEX encoded instrs by VEX encoding when possible.
107  for (MachineBasicBlock &MBB : MF) {
108 
109  // Traverse the basic block.
110  for (MachineInstr &MI : MBB)
111  Changed |= CompressEvexToVexImpl(MI);
112  }
113 
114  return Changed;
115 }
116 
117 static bool usesExtendedRegister(const MachineInstr &MI) {
118  auto isHiRegIdx = [](unsigned Reg) {
119  // Check for XMM register with indexes between 16 - 31.
120  if (Reg >= X86::XMM16 && Reg <= X86::XMM31)
121  return true;
122 
123  // Check for YMM register with indexes between 16 - 31.
124  if (Reg >= X86::YMM16 && Reg <= X86::YMM31)
125  return true;
126 
127  return false;
128  };
129 
130  // Check that operands are not ZMM regs or
131  // XMM/YMM regs with hi indexes between 16 - 31.
132  for (const MachineOperand &MO : MI.explicit_operands()) {
133  if (!MO.isReg())
134  continue;
135 
136  unsigned Reg = MO.getReg();
137 
138  assert(!(Reg >= X86::ZMM0 && Reg <= X86::ZMM31) &&
139  "ZMM instructions should not be in the EVEX->VEX tables");
140 
141  if (isHiRegIdx(Reg))
142  return true;
143  }
144 
145  return false;
146 }
147 
148 // Do any custom cleanup needed to finalize the conversion.
149 static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
150  (void)NewOpc;
151  unsigned Opc = MI.getOpcode();
152  switch (Opc) {
153  case X86::VALIGNDZ128rri:
154  case X86::VALIGNDZ128rmi:
155  case X86::VALIGNQZ128rri:
156  case X86::VALIGNQZ128rmi: {
157  assert((NewOpc == X86::VPALIGNRrri || NewOpc == X86::VPALIGNRrmi) &&
158  "Unexpected new opcode!");
159  unsigned Scale = (Opc == X86::VALIGNQZ128rri ||
160  Opc == X86::VALIGNQZ128rmi) ? 8 : 4;
162  Imm.setImm(Imm.getImm() * Scale);
163  break;
164  }
165  case X86::VSHUFF32X4Z256rmi:
166  case X86::VSHUFF32X4Z256rri:
167  case X86::VSHUFF64X2Z256rmi:
168  case X86::VSHUFF64X2Z256rri:
169  case X86::VSHUFI32X4Z256rmi:
170  case X86::VSHUFI32X4Z256rri:
171  case X86::VSHUFI64X2Z256rmi:
172  case X86::VSHUFI64X2Z256rri: {
173  assert((NewOpc == X86::VPERM2F128rr || NewOpc == X86::VPERM2I128rr ||
174  NewOpc == X86::VPERM2F128rm || NewOpc == X86::VPERM2I128rm) &&
175  "Unexpected new opcode!");
177  int64_t ImmVal = Imm.getImm();
178  // Set bit 5, move bit 1 to bit 4, copy bit 0.
179  Imm.setImm(0x20 | ((ImmVal & 2) << 3) | (ImmVal & 1));
180  break;
181  }
182  case X86::VRNDSCALEPDZ128rri:
183  case X86::VRNDSCALEPDZ128rmi:
184  case X86::VRNDSCALEPSZ128rri:
185  case X86::VRNDSCALEPSZ128rmi:
186  case X86::VRNDSCALEPDZ256rri:
187  case X86::VRNDSCALEPDZ256rmi:
188  case X86::VRNDSCALEPSZ256rri:
189  case X86::VRNDSCALEPSZ256rmi:
190  case X86::VRNDSCALESDZr:
191  case X86::VRNDSCALESDZm:
192  case X86::VRNDSCALESSZr:
193  case X86::VRNDSCALESSZm:
194  case X86::VRNDSCALESDZr_Int:
195  case X86::VRNDSCALESDZm_Int:
196  case X86::VRNDSCALESSZr_Int:
197  case X86::VRNDSCALESSZm_Int:
198  const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
199  int64_t ImmVal = Imm.getImm();
200  // Ensure that only bits 3:0 of the immediate are used.
201  if ((ImmVal & 0xf) != ImmVal)
202  return false;
203  break;
204  }
205 
206  return true;
207 }
208 
209 
210 // For EVEX instructions that can be encoded using VEX encoding
211 // replace them by the VEX encoding in order to reduce size.
212 bool EvexToVexInstPass::CompressEvexToVexImpl(MachineInstr &MI) const {
213  // VEX format.
214  // # of bytes: 0,2,3 1 1 0,1 0,1,2,4 0,1
215  // [Prefixes] [VEX] OPCODE ModR/M [SIB] [DISP] [IMM]
216  //
217  // EVEX format.
218  // # of bytes: 4 1 1 1 4 / 1 1
219  // [Prefixes] EVEX Opcode ModR/M [SIB] [Disp32] / [Disp8*N] [Immediate]
220 
221  const MCInstrDesc &Desc = MI.getDesc();
222 
223  // Check for EVEX instructions only.
224  if ((Desc.TSFlags & X86II::EncodingMask) != X86II::EVEX)
225  return false;
226 
227  // Check for EVEX instructions with mask or broadcast as in these cases
228  // the EVEX prefix is needed in order to carry this information
229  // thus preventing the transformation to VEX encoding.
230  if (Desc.TSFlags & (X86II::EVEX_K | X86II::EVEX_B))
231  return false;
232 
233  // Check for EVEX instructions with L2 set. These instructions are 512-bits
234  // and can't be converted to VEX.
235  if (Desc.TSFlags & X86II::EVEX_L2)
236  return false;
237 
238 #ifndef NDEBUG
239  // Make sure the tables are sorted.
240  static std::atomic<bool> TableChecked(false);
241  if (!TableChecked.load(std::memory_order_relaxed)) {
242  assert(std::is_sorted(std::begin(X86EvexToVex128CompressTable),
243  std::end(X86EvexToVex128CompressTable)) &&
244  "X86EvexToVex128CompressTable is not sorted!");
245  assert(std::is_sorted(std::begin(X86EvexToVex256CompressTable),
246  std::end(X86EvexToVex256CompressTable)) &&
247  "X86EvexToVex256CompressTable is not sorted!");
248  TableChecked.store(true, std::memory_order_relaxed);
249  }
250 #endif
251 
252  // Use the VEX.L bit to select the 128 or 256-bit table.
254  (Desc.TSFlags & X86II::VEX_L) ? makeArrayRef(X86EvexToVex256CompressTable)
255  : makeArrayRef(X86EvexToVex128CompressTable);
256 
257  auto I = std::lower_bound(Table.begin(), Table.end(), MI.getOpcode());
258  if (I == Table.end() || I->EvexOpcode != MI.getOpcode())
259  return false;
260 
261  unsigned NewOpc = I->VexOpcode;
262 
263  if (usesExtendedRegister(MI))
264  return false;
265 
266  if (!performCustomAdjustments(MI, NewOpc))
267  return false;
268 
269  MI.setDesc(TII->get(NewOpc));
271  return true;
272 }
273 
274 INITIALIZE_PASS(EvexToVexInstPass, EVEX2VEX_NAME, EVEX2VEX_DESC, false, false)
275 
277  return new EvexToVexInstPass();
278 }
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:258
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:249
This class represents lattice values for constants.
Definition: AllocatorList.h:23
iterator_range< mop_iterator > explicit_operands()
Definition: MachineInstr.h:464
iterator begin() const
Definition: ArrayRef.h:136
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned Reg
FunctionPass * createX86EvexToVexInsts()
This pass replaces EVEX encoded of AVX-512 instructiosn by VEX encoding when possible in order to red...
bool operator<(const X86EvexToVexCompressTableEntry &RHS) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:450
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:405
void initializeEvexToVexInstPassPass(PassRegistry &)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1281
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void setImm(int64_t immVal)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:33
static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc)
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
MachineOperand class - Representation of each machine instruction operand.
#define EVEX2VEX_DESC
iterator end() const
Definition: ArrayRef.h:137
int64_t getImm() const
MachineFunctionProperties & set(Property P)
static bool usesExtendedRegister(const MachineInstr &MI)
Representation of each machine instruction.
Definition: MachineInstr.h:63
#define EVEX2VEX_NAME
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
friend bool operator<(const X86EvexToVexCompressTableEntry &TE, unsigned Opc)
bool hasAVX512() const
Definition: X86Subtarget.h:561
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
Definition: MachineInstr.h:279
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
Properties which a MachineFunction may have at a given point in time.