LLVM 20.0.0git
RISCVMakeCompressible.cpp
Go to the documentation of this file.
1//===-- RISCVMakeCompressible.cpp - Make more instructions compressible ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass searches for instructions that are prevented from being compressed
10// by one of the following:
11//
12// 1. The use of a single uncompressed register.
13// 2. A base register + offset where the offset is too large to be compressed
14// and the base register may or may not be compressed.
15//
16//
17// For case 1, if a compressed register is available, then the uncompressed
18// register is copied to the compressed register and its uses are replaced.
19//
20// For example, storing zero uses the uncompressible zero register:
21// sw zero, 0(a0) # if zero
22// sw zero, 8(a0) # if zero
23// sw zero, 4(a0) # if zero
24// sw zero, 24(a0) # if zero
25//
26// If a compressed register (e.g. a1) is available, the above can be transformed
27// to the following to improve code size:
28// li a1, 0
29// c.sw a1, 0(a0)
30// c.sw a1, 8(a0)
31// c.sw a1, 4(a0)
32// c.sw a1, 24(a0)
33//
34//
35// For case 2, if a compressed register is available, then the original base
36// is copied and adjusted such that:
37//
38// new_base_register = base_register + adjustment
39// base_register + large_offset = new_base_register + small_offset
40//
41// For example, the following offsets are too large for c.sw:
42// lui a2, 983065
43// sw a1, -236(a2)
44// sw a1, -240(a2)
45// sw a1, -244(a2)
46// sw a1, -248(a2)
47// sw a1, -252(a2)
48// sw a0, -256(a2)
49//
50// If a compressed register is available (e.g. a3), a new base could be created
51// such that the addresses can accessed with a compressible offset, thus
52// improving code size:
53// lui a2, 983065
54// addi a3, a2, -256
55// c.sw a1, 20(a3)
56// c.sw a1, 16(a3)
57// c.sw a1, 12(a3)
58// c.sw a1, 8(a3)
59// c.sw a1, 4(a3)
60// c.sw a0, 0(a3)
61//
62//
63// This optimization is only applied if there are enough uses of the copied
64// register for code size to be reduced.
65//
66//===----------------------------------------------------------------------===//
67
68#include "RISCV.h"
69#include "RISCVSubtarget.h"
70#include "llvm/CodeGen/Passes.h"
73#include "llvm/Support/Debug.h"
74
75using namespace llvm;
76
77#define DEBUG_TYPE "riscv-make-compressible"
78#define RISCV_COMPRESS_INSTRS_NAME "RISC-V Make Compressible"
79
80namespace {
81
82struct RISCVMakeCompressibleOpt : public MachineFunctionPass {
83 static char ID;
84
85 bool runOnMachineFunction(MachineFunction &Fn) override;
86
87 RISCVMakeCompressibleOpt() : MachineFunctionPass(ID) {}
88
89 StringRef getPassName() const override { return RISCV_COMPRESS_INSTRS_NAME; }
90};
91} // namespace
92
93char RISCVMakeCompressibleOpt::ID = 0;
94INITIALIZE_PASS(RISCVMakeCompressibleOpt, "riscv-make-compressible",
95 RISCV_COMPRESS_INSTRS_NAME, false, false)
96
97// Return log2(widthInBytes) of load/store done by Opcode.
98static unsigned log2LdstWidth(unsigned Opcode) {
99 switch (Opcode) {
100 default:
101 llvm_unreachable("Unexpected opcode");
102 case RISCV::LBU:
103 case RISCV::SB:
104 return 0;
105 case RISCV::LH:
106 case RISCV::LHU:
107 case RISCV::SH:
108 return 1;
109 case RISCV::LW:
110 case RISCV::SW:
111 case RISCV::FLW:
112 case RISCV::FSW:
113 return 2;
114 case RISCV::LD:
115 case RISCV::SD:
116 case RISCV::FLD:
117 case RISCV::FSD:
118 return 3;
119 }
120}
121
122// Return bit field size of immediate operand of Opcode.
123static unsigned offsetMask(unsigned Opcode) {
124 switch (Opcode) {
125 default:
126 llvm_unreachable("Unexpected opcode");
127 case RISCV::LBU:
128 case RISCV::SB:
129 return maskTrailingOnes<unsigned>(2U);
130 case RISCV::LH:
131 case RISCV::LHU:
132 case RISCV::SH:
133 return maskTrailingOnes<unsigned>(1U);
134 case RISCV::LW:
135 case RISCV::SW:
136 case RISCV::FLW:
137 case RISCV::FSW:
138 case RISCV::LD:
139 case RISCV::SD:
140 case RISCV::FLD:
141 case RISCV::FSD:
142 return maskTrailingOnes<unsigned>(5U);
143 }
144}
145
146// Return a mask for the offset bits of a non-stack-pointer based compressed
147// load/store.
148static uint8_t compressedLDSTOffsetMask(unsigned Opcode) {
149 return offsetMask(Opcode) << log2LdstWidth(Opcode);
150}
151
152// Return true if Offset fits within a compressed stack-pointer based
153// load/store.
154static bool compressibleSPOffset(int64_t Offset, unsigned Opcode) {
155 // Compressed sp-based loads and stores only work for 32/64 bits.
156 switch (log2LdstWidth(Opcode)) {
157 case 2:
158 return isShiftedUInt<6, 2>(Offset);
159 case 3:
160 return isShiftedUInt<6, 3>(Offset);
161 }
162 return false;
163}
164
165// Given an offset for a load/store, return the adjustment required to the base
166// register such that the address can be accessed with a compressible offset.
167// This will return 0 if the offset is already compressible.
168static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode) {
169 // Return the excess bits that do not fit in a compressible offset.
170 return Offset & ~compressedLDSTOffsetMask(Opcode);
171}
172
173// Return true if Reg is in a compressed register class.
174static bool isCompressedReg(Register Reg) {
175 return RISCV::GPRCRegClass.contains(Reg) ||
176 RISCV::FPR32CRegClass.contains(Reg) ||
177 RISCV::FPR64CRegClass.contains(Reg);
178}
179
180// Return true if MI is a load for which there exists a compressed version.
181static bool isCompressibleLoad(const MachineInstr &MI) {
182 const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
183
184 switch (MI.getOpcode()) {
185 default:
186 return false;
187 case RISCV::LBU:
188 case RISCV::LH:
189 case RISCV::LHU:
190 return STI.hasStdExtZcb();
191 case RISCV::LW:
192 case RISCV::LD:
193 return STI.hasStdExtCOrZca();
194 case RISCV::FLW:
195 return !STI.is64Bit() && STI.hasStdExtCOrZcfOrZce();
196 case RISCV::FLD:
197 return STI.hasStdExtCOrZcd();
198 }
199}
200
201// Return true if MI is a store for which there exists a compressed version.
203 const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
204
205 switch (MI.getOpcode()) {
206 default:
207 return false;
208 case RISCV::SB:
209 case RISCV::SH:
210 return STI.hasStdExtZcb();
211 case RISCV::SW:
212 case RISCV::SD:
213 return STI.hasStdExtCOrZca();
214 case RISCV::FSW:
215 return !STI.is64Bit() && STI.hasStdExtCOrZcfOrZce();
216 case RISCV::FSD:
217 return STI.hasStdExtCOrZcd();
218 }
219}
220
221// Find a single register and/or large offset which, if compressible, would
222// allow the given instruction to be compressed.
223//
224// Possible return values:
225//
226// {Reg, 0} - Uncompressed Reg needs replacing with a compressed
227// register.
228// {Reg, N} - Reg needs replacing with a compressed register and
229// N needs adding to the new register. (Reg may be
230// compressed or uncompressed).
231// {RISCV::NoRegister, 0} - No suitable optimization found for this
232// instruction.
234 const unsigned Opcode = MI.getOpcode();
235
237 const MachineOperand &MOImm = MI.getOperand(2);
238 if (!MOImm.isImm())
239 return RegImmPair(RISCV::NoRegister, 0);
240
241 int64_t Offset = MOImm.getImm();
242 int64_t NewBaseAdjust = getBaseAdjustForCompression(Offset, Opcode);
243 Register Base = MI.getOperand(1).getReg();
244
245 // Memory accesses via the stack pointer do not have a requirement for
246 // either of the registers to be compressible and can take a larger offset.
247 if (RISCV::SPRegClass.contains(Base)) {
248 if (!compressibleSPOffset(Offset, Opcode) && NewBaseAdjust)
249 return RegImmPair(Base, NewBaseAdjust);
250 } else {
251 Register SrcDest = MI.getOperand(0).getReg();
252 bool SrcDestCompressed = isCompressedReg(SrcDest);
253 bool BaseCompressed = isCompressedReg(Base);
254
255 // If only Base and/or offset prevent compression, then return Base and
256 // any adjustment required to make the offset compressible.
257 if ((!BaseCompressed || NewBaseAdjust) && SrcDestCompressed)
258 return RegImmPair(Base, NewBaseAdjust);
259
260 // For loads, we can only change the base register since dest is defined
261 // rather than used.
262 //
263 // For stores, we can change SrcDest (and Base if SrcDest == Base) but
264 // cannot resolve an uncompressible offset in this case.
265 if (isCompressibleStore(MI)) {
266 if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) &&
267 !NewBaseAdjust)
268 return RegImmPair(SrcDest, NewBaseAdjust);
269 }
270 }
271 }
272 return RegImmPair(RISCV::NoRegister, 0);
273}
274
275// Check all uses after FirstMI of the given register, keeping a vector of
276// instructions that would be compressible if the given register (and offset if
277// applicable) were compressible.
278//
279// If there are enough uses for this optimization to improve code size and a
280// compressed register is available, return that compressed register.
282 RegImmPair RegImm,
284 MachineBasicBlock &MBB = *FirstMI.getParent();
285 const TargetRegisterInfo *TRI =
287
289 E = MBB.instr_end();
290 I != E; ++I) {
291 MachineInstr &MI = *I;
292
293 // Determine if this is an instruction which would benefit from using the
294 // new register.
296 if (CandidateRegImm.Reg == RegImm.Reg && CandidateRegImm.Imm == RegImm.Imm)
297 MIs.push_back(&MI);
298
299 // If RegImm.Reg is modified by this instruction, then we cannot optimize
300 // past this instruction. If the register is already compressed, then it may
301 // possible to optimize a large offset in the current instruction - this
302 // will have been detected by the preceeding call to
303 // getRegImmPairPreventingCompression.
304 if (MI.modifiesRegister(RegImm.Reg, TRI))
305 break;
306 }
307
308 // Adjusting the base costs one new uncompressed addi and therefore three uses
309 // are required for a code size reduction. If no base adjustment is required,
310 // then copying the register costs one new c.mv (or c.li Rd, 0 for "copying"
311 // the zero register) and therefore two uses are required for a code size
312 // reduction.
313 if (MIs.size() < 2 || (RegImm.Imm != 0 && MIs.size() < 3))
314 return RISCV::NoRegister;
315
316 // Find a compressible register which will be available from the first
317 // instruction we care about to the last.
318 const TargetRegisterClass *RCToScavenge;
319
320 // Work out the compressed register class from which to scavenge.
321 if (RISCV::GPRRegClass.contains(RegImm.Reg))
322 RCToScavenge = &RISCV::GPRCRegClass;
323 else if (RISCV::FPR32RegClass.contains(RegImm.Reg))
324 RCToScavenge = &RISCV::FPR32CRegClass;
325 else if (RISCV::FPR64RegClass.contains(RegImm.Reg))
326 RCToScavenge = &RISCV::FPR64CRegClass;
327 else
328 return RISCV::NoRegister;
329
330 RegScavenger RS;
332 RS.backward(std::next(MIs.back()->getIterator()));
333 return RS.scavengeRegisterBackwards(*RCToScavenge, FirstMI.getIterator(),
334 /*RestoreAfter=*/false, /*SPAdj=*/0,
335 /*AllowSpill=*/false);
336}
337
338// Update uses of the old register in the given instruction to the new register.
339static void updateOperands(MachineInstr &MI, RegImmPair OldRegImm,
340 Register NewReg) {
341 unsigned Opcode = MI.getOpcode();
342
343 // If this pass is extended to support more instructions, the check for
344 // definedness may need to be strengthened.
346 "Unsupported instruction for this optimization.");
347
348 int SkipN = 0;
349
350 // Skip the first (value) operand to a store instruction (except if the store
351 // offset is zero) in order to avoid an incorrect transformation.
352 // e.g. sd a0, 808(a0) to addi a2, a0, 768; sd a2, 40(a2)
353 if (isCompressibleStore(MI) && OldRegImm.Imm != 0)
354 SkipN = 1;
355
356 // Update registers
357 for (MachineOperand &MO : drop_begin(MI.operands(), SkipN))
358 if (MO.isReg() && MO.getReg() == OldRegImm.Reg) {
359 // Do not update operands that define the old register.
360 //
361 // The new register was scavenged for the range of instructions that are
362 // being updated, therefore it should not be defined within this range
363 // except possibly in the final instruction.
364 if (MO.isDef()) {
366 continue;
367 }
368 // Update reg
369 MO.setReg(NewReg);
370 }
371
372 // Update offset
373 MachineOperand &MOImm = MI.getOperand(2);
374 int64_t NewOffset = MOImm.getImm() & compressedLDSTOffsetMask(Opcode);
375 MOImm.setImm(NewOffset);
376}
377
378bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
379 // This is a size optimization.
380 if (skipFunction(Fn.getFunction()) || !Fn.getFunction().hasMinSize())
381 return false;
382
383 const RISCVSubtarget &STI = Fn.getSubtarget<RISCVSubtarget>();
384 const RISCVInstrInfo &TII = *STI.getInstrInfo();
385
386 // This optimization only makes sense if compressed instructions are emitted.
387 if (!STI.hasStdExtCOrZca())
388 return false;
389
390 for (MachineBasicBlock &MBB : Fn) {
391 LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
392 for (MachineInstr &MI : MBB) {
393 // Determine if this instruction would otherwise be compressed if not for
394 // an uncompressible register or offset.
396 if (!RegImm.Reg && RegImm.Imm == 0)
397 continue;
398
399 // Determine if there is a set of instructions for which replacing this
400 // register with a compressed register (and compressible offset if
401 // applicable) is possible and will allow compression.
403 Register NewReg = analyzeCompressibleUses(MI, RegImm, MIs);
404 if (!NewReg)
405 continue;
406
407 // Create the appropriate copy and/or offset.
408 if (RISCV::GPRRegClass.contains(RegImm.Reg)) {
409 assert(isInt<12>(RegImm.Imm));
410 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), NewReg)
411 .addReg(RegImm.Reg)
412 .addImm(RegImm.Imm);
413 } else {
414 // If we are looking at replacing an FPR register we don't expect to
415 // have any offset. The only compressible FP instructions with an offset
416 // are loads and stores, for which the offset applies to the GPR operand
417 // not the FPR operand.
418 assert(RegImm.Imm == 0);
419 unsigned Opcode = RISCV::FPR32RegClass.contains(RegImm.Reg)
420 ? RISCV::FSGNJ_S
421 : RISCV::FSGNJ_D;
422 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(Opcode), NewReg)
423 .addReg(RegImm.Reg)
424 .addReg(RegImm.Reg);
425 }
426
427 // Update the set of instructions to use the compressed register and
428 // compressible offset instead. These instructions should now be
429 // compressible.
430 // TODO: Update all uses if RegImm.Imm == 0? Not just those that are
431 // expected to become compressible.
432 for (MachineInstr *UpdateMI : MIs)
433 updateOperands(*UpdateMI, RegImm, NewReg);
434 }
435 }
436 return true;
437}
438
439/// Returns an instance of the Make Compressible Optimization pass.
441 return new RISCVMakeCompressibleOpt();
442}
MachineBasicBlock & MBB
#define LLVM_DEBUG(X)
Definition: Debug.h:101
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static bool isCompressibleLoad(const MachineInstr &MI)
#define RISCV_COMPRESS_INSTRS_NAME
static unsigned offsetMask(unsigned Opcode)
static bool isCompressibleStore(const MachineInstr &MI)
static uint8_t compressedLDSTOffsetMask(unsigned Opcode)
static bool isCompressedReg(Register Reg)
static Register analyzeCompressibleUses(MachineInstr &FirstMI, RegImmPair RegImm, SmallVectorImpl< MachineInstr * > &MIs)
static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode)
static void updateOperands(MachineInstr &MI, RegImmPair OldRegImm, Register NewReg)
static bool compressibleSPOffset(int64_t Offset, unsigned Opcode)
static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:702
Instructions::iterator instr_iterator
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
bool hasStdExtCOrZca() const
bool hasStdExtCOrZcfOrZce() const
const RISCVInstrInfo * getInstrInfo() const override
bool hasStdExtCOrZcd() const
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void backward()
Update internal register state and move MBB iterator backwards.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createRISCVMakeCompressibleOptPass()
Returns an instance of the Make Compressible Optimization pass.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Used to describe a register and immediate addition.