LLVM 23.0.0git
RISCVMergeBaseOffset.cpp
Go to the documentation of this file.
1//===----- RISCVMergeBaseOffset.cpp - Optimise address calculations ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Merge the offset of address calculation into the offset field
10// of instructions in a global address lowering sequence.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCV.h"
15#include "RISCVTargetMachine.h"
17#include "llvm/CodeGen/Passes.h"
19#include "llvm/Support/Debug.h"
21#include <optional>
22using namespace llvm;
23
24#define DEBUG_TYPE "riscv-merge-base-offset"
25#define RISCV_MERGE_BASE_OFFSET_NAME "RISC-V Merge Base Offset"
26namespace {
27
28class RISCVMergeBaseOffsetOpt : public MachineFunctionPass {
29 const RISCVSubtarget *ST = nullptr;
31
32public:
33 static char ID;
34 bool runOnMachineFunction(MachineFunction &Fn) override;
35 bool detectFoldable(MachineInstr &Hi, MachineInstr *&Lo);
36
37 bool detectAndFoldOffset(MachineInstr &Hi, MachineInstr &Lo);
38 bool foldOffset(MachineInstr &Hi, MachineInstr &Lo, MachineInstr &Tail,
39 int64_t Offset);
40 bool foldLargeOffset(MachineInstr &Hi, MachineInstr &Lo,
41 MachineInstr &TailAdd, Register GSReg);
42 bool foldShiftedOffset(MachineInstr &Hi, MachineInstr &Lo,
43 MachineInstr &TailShXAdd, Register GSReg);
44
45 bool foldIntoMemoryOps(MachineInstr &Hi, MachineInstr &Lo);
46
47 RISCVMergeBaseOffsetOpt() : MachineFunctionPass(ID) {}
48
49 MachineFunctionProperties getRequiredProperties() const override {
50 return MachineFunctionProperties().setIsSSA();
51 }
52
53 void getAnalysisUsage(AnalysisUsage &AU) const override {
54 AU.setPreservesCFG();
56 }
57
58 StringRef getPassName() const override {
60 }
61};
62} // end anonymous namespace
63
64char RISCVMergeBaseOffsetOpt::ID = 0;
65INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, DEBUG_TYPE,
66 RISCV_MERGE_BASE_OFFSET_NAME, false, false)
67
68// Detect either of the patterns:
69//
70// 1. (medlow pattern):
71// a. lui vreg1, %hi(s)
72// addi vreg2, vreg1, %lo(s)
73//
74// b. qc.e.li vreg1, s
75//
76// 2. (medany pattern):
77// .Lpcrel_hi1:
78// auipc vreg1, %pcrel_hi(s)
79// addi vreg2, vreg1, %pcrel_lo(.Lpcrel_hi1)
80//
81// The pattern is only accepted if:
82// 1) The first instruction has only one use, which is the ADDI.
83// 2) The address operands have the appropriate type, reflecting the
84// lowering of a global address or constant pool using medlow or medany.
85// 3) The offset value in the Global Address or Constant Pool is 0.
86bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
87 MachineInstr *&Lo) {
88 auto HiOpc = Hi.getOpcode();
89 if (HiOpc != RISCV::LUI && HiOpc != RISCV::AUIPC &&
90 HiOpc != RISCV::PseudoMovAddr && HiOpc != RISCV::QC_E_LI)
91 return false;
92
93 const MachineOperand &HiOp1 = Hi.getOperand(1);
94 unsigned ExpectedFlags = HiOpc == RISCV::AUIPC ? RISCVII::MO_PCREL_HI
95 : HiOpc == RISCV::QC_E_LI ? RISCVII::MO_None
97 if (HiOp1.getTargetFlags() != ExpectedFlags)
98 return false;
99
100 if (!(HiOp1.isGlobal() || HiOp1.isCPI() || HiOp1.isBlockAddress()) ||
101 HiOp1.getOffset() != 0)
102 return false;
103
104 if (HiOpc == RISCV::PseudoMovAddr || HiOpc == RISCV::QC_E_LI) {
105 // Most of the code should handle it correctly without modification by
106 // setting Lo and Hi both point to PseudoMovAddr/QC_E_LI
107 Lo = &Hi;
108 } else {
109 Register HiDestReg = Hi.getOperand(0).getReg();
110 if (!MRI->hasOneUse(HiDestReg))
111 return false;
112
113 Lo = &*MRI->use_instr_begin(HiDestReg);
114 if (Lo->getOpcode() != RISCV::ADDI)
115 return false;
116 }
117
118 if (HiOpc != RISCV::QC_E_LI) {
119 const MachineOperand &LoOp2 = Lo->getOperand(2);
120 if (HiOpc == RISCV::LUI || HiOpc == RISCV::PseudoMovAddr) {
121 if (LoOp2.getTargetFlags() != RISCVII::MO_LO ||
122 !(LoOp2.isGlobal() || LoOp2.isCPI() || LoOp2.isBlockAddress()) ||
123 LoOp2.getOffset() != 0)
124 return false;
125 } else {
126 assert(HiOpc == RISCV::AUIPC);
127 if (LoOp2.getTargetFlags() != RISCVII::MO_PCREL_LO ||
129 return false;
130 }
131 }
132
133 if (HiOp1.isGlobal()) {
134 LLVM_DEBUG(dbgs() << " Found lowered global address: "
135 << *HiOp1.getGlobal() << "\n");
136 } else if (HiOp1.isBlockAddress()) {
137 LLVM_DEBUG(dbgs() << " Found lowered basic address: "
138 << *HiOp1.getBlockAddress() << "\n");
139 } else if (HiOp1.isCPI()) {
140 LLVM_DEBUG(dbgs() << " Found lowered constant pool: " << HiOp1.getIndex()
141 << "\n");
142 }
143
144 return true;
145}
146
147// Update the offset in Hi and Lo instructions.
148// Delete the tail instruction and update all the uses to use the
149// output from Lo.
150bool RISCVMergeBaseOffsetOpt::foldOffset(MachineInstr &Hi, MachineInstr &Lo,
151 MachineInstr &Tail, int64_t Offset) {
152 assert(isInt<32>(Offset) && "Unexpected offset");
153
154 // If Hi is an AUIPC, don't fold the offset if it is outside the bounds of
155 // the global object. The object may be within 2GB of the PC, but addresses
156 // outside of the object might not be.
157 auto HiOpc = Hi.getOpcode();
158 if (HiOpc == RISCV::AUIPC && Hi.getOperand(1).isGlobal()) {
159 const GlobalValue *GV = Hi.getOperand(1).getGlobal();
160 Type *Ty = GV->getValueType();
161 if (!Ty->isSized() || Offset < 0 ||
162 (uint64_t)Offset > GV->getDataLayout().getTypeAllocSize(Ty))
163 return false;
164 }
165
166 // Put the offset back in Hi and the Lo
167 Hi.getOperand(1).setOffset(Offset);
168 if (Hi.getOpcode() != RISCV::AUIPC && Hi.getOpcode() != RISCV::QC_E_LI)
169 Lo.getOperand(2).setOffset(Offset);
170 // Delete the tail instruction.
171 Register LoOp0Reg = Lo.getOperand(0).getReg();
172 Register TailOp0Reg = Tail.getOperand(0).getReg();
173 MRI->constrainRegClass(LoOp0Reg, MRI->getRegClass(TailOp0Reg));
174 MRI->replaceRegWith(TailOp0Reg, LoOp0Reg);
175 Tail.eraseFromParent();
176 LLVM_DEBUG(dbgs() << " Merged offset " << Offset << " into base.\n"
177 << " " << Hi << " " << Lo;);
178 return true;
179}
180
181// Detect patterns for large offsets that are passed into an ADD instruction.
182// If the pattern is found, updates the offset in Hi and Lo instructions
183// and deletes TailAdd and the instructions that produced the offset.
184//
185// Base address lowering is of the form:
186// Hi: lui vreg1, %hi(s)
187// Lo: addi vreg2, vreg1, %lo(s)
188// / \
189// / \
190// / \
191// / The large offset can be of two forms: \
192// 1) Offset that has non zero bits in lower 2) Offset that has non zero
193// 12 bits and upper 20 bits bits in upper 20 bits only
194// OffseLUI: lui vreg3, 4
195// OffsetTail: addi voff, vreg3, 188 OffsetTail: lui voff, 128
196// \ /
197// \ /
198// \ /
199// \ /
200// TailAdd: add vreg4, vreg2, voff
201bool RISCVMergeBaseOffsetOpt::foldLargeOffset(MachineInstr &Hi,
202 MachineInstr &Lo,
203 MachineInstr &TailAdd,
204 Register GAReg) {
205 assert((TailAdd.getOpcode() == RISCV::ADD) && "Expected ADD instruction!");
206 Register Rs = TailAdd.getOperand(1).getReg();
207 Register Rt = TailAdd.getOperand(2).getReg();
208 Register Reg = Rs == GAReg ? Rt : Rs;
209
210 // Can't fold if the register has more than one use.
211 if (!Reg.isVirtual() || !MRI->hasOneUse(Reg))
212 return false;
213 // This can point to an ADDI(W) or a LUI:
214 MachineInstr &OffsetTail = *MRI->getVRegDef(Reg);
215 auto OffsetTailOpc = OffsetTail.getOpcode();
216 if (OffsetTailOpc == RISCV::ADDI || OffsetTailOpc == RISCV::ADDIW) {
217 // The offset value has non zero bits in both %hi and %lo parts.
218 // Detect an ADDI that feeds from a LUI instruction.
219 MachineOperand &AddiImmOp = OffsetTail.getOperand(2);
220 if (AddiImmOp.getTargetFlags() != RISCVII::MO_None)
221 return false;
222 Register AddiReg = OffsetTail.getOperand(1).getReg();
223 int64_t OffLo = AddiImmOp.getImm();
224
225 // Handle rs1 of ADDI is X0.
226 if (AddiReg == RISCV::X0) {
227 LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail);
228 if (!foldOffset(Hi, Lo, TailAdd, OffLo))
229 return false;
230 OffsetTail.eraseFromParent();
231 return true;
232 }
233
234 MachineInstr &OffsetLui = *MRI->getVRegDef(AddiReg);
235 MachineOperand &LuiImmOp = OffsetLui.getOperand(1);
236 if (OffsetLui.getOpcode() != RISCV::LUI ||
237 LuiImmOp.getTargetFlags() != RISCVII::MO_None ||
238 !MRI->hasOneUse(OffsetLui.getOperand(0).getReg()))
239 return false;
240 int64_t Offset = SignExtend64<32>(LuiImmOp.getImm() << 12);
241 Offset += OffLo;
242 // RV32 ignores the upper 32 bits. ADDIW sign extends the result.
243 if (!ST->is64Bit() || OffsetTailOpc == RISCV::ADDIW)
245 // We can only fold simm32 offsets.
246 if (!isInt<32>(Offset))
247 return false;
248 LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail
249 << " " << OffsetLui);
250 if (!foldOffset(Hi, Lo, TailAdd, Offset))
251 return false;
252 OffsetTail.eraseFromParent();
253 OffsetLui.eraseFromParent();
254 return true;
255 } else if (OffsetTailOpc == RISCV::LUI) {
256 // The offset value has all zero bits in the lower 12 bits. Only LUI
257 // exists.
258 LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
259 int64_t Offset = SignExtend64<32>(OffsetTail.getOperand(1).getImm() << 12);
260 if (!foldOffset(Hi, Lo, TailAdd, Offset))
261 return false;
262 OffsetTail.eraseFromParent();
263 return true;
264 }
265 return false;
266}
267
268// Detect patterns for offsets that are passed into a SHXADD instruction.
269// The offset has 1, 2, or 3 trailing zeros and fits in simm13, simm14, simm15.
270// The constant is created with addi voff, x0, C, and shXadd is used to
271// fill insert the trailing zeros and do the addition.
272// If the pattern is found, updates the offset in Hi and Lo instructions
273// and deletes TailShXAdd and the instructions that produced the offset.
274//
275// Hi: lui vreg1, %hi(s)
276// Lo: addi vreg2, vreg1, %lo(s)
277// OffsetTail: addi voff, x0, C
278// TailAdd: shXadd vreg4, voff, vreg2
279bool RISCVMergeBaseOffsetOpt::foldShiftedOffset(MachineInstr &Hi,
280 MachineInstr &Lo,
281 MachineInstr &TailShXAdd,
282 Register GAReg) {
283 assert((TailShXAdd.getOpcode() == RISCV::SH1ADD ||
284 TailShXAdd.getOpcode() == RISCV::SH2ADD ||
285 TailShXAdd.getOpcode() == RISCV::SH3ADD) &&
286 "Expected SHXADD instruction!");
287
288 if (GAReg != TailShXAdd.getOperand(2).getReg())
289 return false;
290
291 // The first source is the shifted operand.
292 Register Rs1 = TailShXAdd.getOperand(1).getReg();
293
294 // Can't fold if the register has more than one use.
295 if (!Rs1.isVirtual() || !MRI->hasOneUse(Rs1))
296 return false;
297 // This can point to an ADDI X0, C.
298 MachineInstr &OffsetTail = *MRI->getVRegDef(Rs1);
299 if (OffsetTail.getOpcode() != RISCV::ADDI)
300 return false;
301 if (!OffsetTail.getOperand(1).isReg() ||
302 OffsetTail.getOperand(1).getReg() != RISCV::X0 ||
303 !OffsetTail.getOperand(2).isImm())
304 return false;
305
306 int64_t Offset = OffsetTail.getOperand(2).getImm();
307 assert(isInt<12>(Offset) && "Unexpected offset");
308
309 unsigned ShAmt;
310 switch (TailShXAdd.getOpcode()) {
311 default: llvm_unreachable("Unexpected opcode");
312 case RISCV::SH1ADD: ShAmt = 1; break;
313 case RISCV::SH2ADD: ShAmt = 2; break;
314 case RISCV::SH3ADD: ShAmt = 3; break;
315 }
316
317 Offset = (uint64_t)Offset << ShAmt;
318
319 LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
320 if (!foldOffset(Hi, Lo, TailShXAdd, Offset))
321 return false;
322 OffsetTail.eraseFromParent();
323 return true;
324}
325
326bool RISCVMergeBaseOffsetOpt::detectAndFoldOffset(MachineInstr &Hi,
327 MachineInstr &Lo) {
328 Register DestReg = Lo.getOperand(0).getReg();
329
330 // Look for arithmetic instructions we can get an offset from.
331 // We might be able to remove the arithmetic instructions by folding the
332 // offset into the LUI+ADDI.
333 if (!MRI->hasOneUse(DestReg))
334 return false;
335
336 // Lo has only one use.
337 MachineInstr &Tail = *MRI->use_instr_begin(DestReg);
338 switch (Tail.getOpcode()) {
339 default:
340 LLVM_DEBUG(dbgs() << "Don't know how to get offset from this instr:"
341 << Tail);
342 break;
343 case RISCV::ADDI:
344 case RISCV::QC_E_ADDI:
345 case RISCV::QC_E_ADDAI: {
346 // Offset is simply an immediate operand.
347 int64_t Offset = Tail.getOperand(2).getImm();
348 if (Tail.getOpcode() == RISCV::ADDI) {
349 // We might have two ADDIs in a row.
350 Register TailDestReg = Tail.getOperand(0).getReg();
351 if (MRI->hasOneUse(TailDestReg)) {
352 MachineInstr &TailTail = *MRI->use_instr_begin(TailDestReg);
353 if (TailTail.getOpcode() == RISCV::ADDI) {
354 Offset += TailTail.getOperand(2).getImm();
355 LLVM_DEBUG(dbgs() << " Offset Instrs: " << Tail << TailTail);
356 if (!foldOffset(Hi, Lo, TailTail, Offset))
357 return false;
358 Tail.eraseFromParent();
359 return true;
360 }
361 }
362 }
363
364 LLVM_DEBUG(dbgs() << " Offset Instr: " << Tail);
365 return foldOffset(Hi, Lo, Tail, Offset);
366 }
367 case RISCV::ADD:
368 // The offset is too large to fit in the immediate field of ADDI.
369 // This can be in two forms:
370 // 1) LUI hi_Offset followed by:
371 // ADDI lo_offset
372 // This happens in case the offset has non zero bits in
373 // both hi 20 and lo 12 bits.
374 // 2) LUI (offset20)
375 // This happens in case the lower 12 bits of the offset are zeros.
376 return foldLargeOffset(Hi, Lo, Tail, DestReg);
377 case RISCV::SH1ADD:
378 case RISCV::SH2ADD:
379 case RISCV::SH3ADD:
380 // The offset is too large to fit in the immediate field of ADDI.
381 // It may be encoded as (SH2ADD (ADDI X0, C), DestReg) or
382 // (SH3ADD (ADDI X0, C), DestReg).
383 return foldShiftedOffset(Hi, Lo, Tail, DestReg);
384 }
385
386 return false;
387}
388
389bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
390 MachineInstr &Lo) {
391 Register DestReg = Lo.getOperand(0).getReg();
392
393 // If all the uses are memory ops with the same offset, we can transform:
394 //
395 // 1. (medlow pattern):
396 // a. Hi: lui vreg1, %hi(foo) ---> lui vreg1, %hi(foo+8)
397 // Lo: addi vreg2, vreg1, %lo(foo) ---> lw vreg3, lo(foo+8)(vreg1)
398 // Tail: lw vreg3, 8(vreg2)
399 //
400 // b. Hi: qc.e.li vreg1, foo ---> qc.e.li vreg1, foo+8
401 // Tail: lw vreg2, 8(vreg1) ---> lw vreg2, 0(vreg1)
402 //
403 // 2. (medany pattern):
404 // Hi: 1:auipc vreg1, %pcrel_hi(s) ---> auipc vreg1, %pcrel_hi(foo+8)
405 // Lo: addi vreg2, vreg1, %pcrel_lo(1b) ---> lw vreg3, %pcrel_lo(1b)(vreg1)
406 // Tail: lw vreg3, 8(vreg2)
407
408 std::optional<int64_t> CommonOffset;
409 DenseMap<const MachineInstr *, SmallVector<unsigned>>
410 InlineAsmMemoryOpIndexesMap;
411 for (const MachineInstr &UseMI : MRI->use_instructions(DestReg)) {
412 switch (UseMI.getOpcode()) {
413 default:
414 LLVM_DEBUG(dbgs() << "Not a load or store instruction: " << UseMI);
415 return false;
416 case RISCV::LB:
417 case RISCV::LH:
418 case RISCV::LH_INX:
419 case RISCV::LW:
420 case RISCV::LW_INX:
421 case RISCV::LBU:
422 case RISCV::LHU:
423 case RISCV::LWU:
424 case RISCV::LD:
425 case RISCV::LD_RV32:
426 case RISCV::FLH:
427 case RISCV::FLW:
428 case RISCV::FLD:
429 case RISCV::SB:
430 case RISCV::SH:
431 case RISCV::SH_INX:
432 case RISCV::SW:
433 case RISCV::SW_INX:
434 case RISCV::SD:
435 case RISCV::SD_RV32:
436 case RISCV::FSH:
437 case RISCV::FSW:
438 case RISCV::FSD: {
439 if (UseMI.getOperand(1).isFI())
440 return false;
441 // Register defined by Lo should not be the value register.
442 if (DestReg == UseMI.getOperand(0).getReg())
443 return false;
444 assert(DestReg == UseMI.getOperand(1).getReg() &&
445 "Expected base address use");
446 // All load/store instructions must use the same offset.
447 int64_t Offset = UseMI.getOperand(2).getImm();
448 if (CommonOffset && Offset != CommonOffset)
449 return false;
450 CommonOffset = Offset;
451 break;
452 }
453 case RISCV::INLINEASM:
454 case RISCV::INLINEASM_BR: {
455 SmallVector<unsigned> InlineAsmMemoryOpIndexes;
456 unsigned NumOps = 0;
457 for (unsigned I = InlineAsm::MIOp_FirstOperand;
458 I < UseMI.getNumOperands(); I += 1 + NumOps) {
459 const MachineOperand &FlagsMO = UseMI.getOperand(I);
460 // Should be an imm.
461 if (!FlagsMO.isImm())
462 continue;
463
464 const InlineAsm::Flag Flags(FlagsMO.getImm());
465 NumOps = Flags.getNumOperandRegisters();
466
467 // Memory constraints have two operands.
468 if (NumOps != 2 || !Flags.isMemKind()) {
469 // If the register is used by something other than a memory
470 // constraint, we should not fold.
471 for (unsigned J = 0; J < NumOps; ++J) {
472 const MachineOperand &MO = UseMI.getOperand(I + 1 + J);
473 if (MO.isReg() && MO.getReg() == DestReg)
474 return false;
475 }
476 continue;
477 }
478
479 // We can't do this for constraint A because AMO instructions don't have
480 // an immediate offset field.
481 if (Flags.getMemoryConstraintID() == InlineAsm::ConstraintCode::A)
482 return false;
483
484 const MachineOperand &AddrMO = UseMI.getOperand(I + 1);
485 if (!AddrMO.isReg() || AddrMO.getReg() != DestReg)
486 continue;
487
488 const MachineOperand &OffsetMO = UseMI.getOperand(I + 2);
489 if (!OffsetMO.isImm())
490 continue;
491
492 // All inline asm memory operands must use the same offset.
493 int64_t Offset = OffsetMO.getImm();
494 if (CommonOffset && Offset != CommonOffset)
495 return false;
496 CommonOffset = Offset;
497 InlineAsmMemoryOpIndexes.push_back(I + 1);
498 }
499 InlineAsmMemoryOpIndexesMap.insert(
500 std::make_pair(&UseMI, InlineAsmMemoryOpIndexes));
501 break;
502 }
503 }
504 }
505
506 // We found a common offset.
507 // Update the offsets in global address lowering.
508 // We may have already folded some arithmetic so we need to add to any
509 // existing offset.
510 int64_t NewOffset = Hi.getOperand(1).getOffset() + *CommonOffset;
511 // RV32 ignores the upper 32 bits.
512 if (!ST->is64Bit())
513 NewOffset = SignExtend64<32>(NewOffset);
514 // We can only fold simm32 offsets.
515 if (!isInt<32>(NewOffset))
516 return false;
517
518 Hi.getOperand(1).setOffset(NewOffset);
519 MachineOperand &ImmOp =
520 Hi.getOpcode() == RISCV::QC_E_LI ? Lo.getOperand(1) : Lo.getOperand(2);
521 auto HiOpc = Hi.getOpcode();
522 // Expand PseudoMovAddr into LUI
523 if (HiOpc == RISCV::PseudoMovAddr) {
524 auto *TII = ST->getInstrInfo();
525 Hi.setDesc(TII->get(RISCV::LUI));
526 Hi.removeOperand(2);
527 }
528
529 if (HiOpc != RISCV::AUIPC)
530 ImmOp.setOffset(NewOffset);
531
532 // Update the immediate in the load/store instructions to add the offset.
533 for (MachineInstr &UseMI :
534 llvm::make_early_inc_range(MRI->use_instructions(DestReg))) {
535 if (UseMI.getOpcode() == RISCV::INLINEASM ||
536 UseMI.getOpcode() == RISCV::INLINEASM_BR) {
537 auto &InlineAsmMemoryOpIndexes = InlineAsmMemoryOpIndexesMap[&UseMI];
538 for (unsigned I : InlineAsmMemoryOpIndexes) {
539 MachineOperand &MO = UseMI.getOperand(I + 1);
540 switch (ImmOp.getType()) {
542 MO.ChangeToGA(ImmOp.getGlobal(), ImmOp.getOffset(),
543 ImmOp.getTargetFlags());
544 break;
546 MO.ChangeToMCSymbol(ImmOp.getMCSymbol(), ImmOp.getTargetFlags());
547 MO.setOffset(ImmOp.getOffset());
548 break;
550 MO.ChangeToBA(ImmOp.getBlockAddress(), ImmOp.getOffset(),
551 ImmOp.getTargetFlags());
552 break;
553 default:
554 report_fatal_error("unsupported machine operand type");
555 break;
556 }
557 }
558 } else {
559 if (Hi.getOpcode() == RISCV::QC_E_LI) {
560 UseMI.getOperand(2).ChangeToImmediate(0);
561 } else {
562 UseMI.removeOperand(2);
563 UseMI.addOperand(ImmOp);
564 }
565 }
566 }
567
568 // Prevent Lo (originally PseudoMovAddr, which is also pointed by Hi) from
569 // being erased
570 if (&Lo == &Hi)
571 return true;
572
573 MRI->replaceRegWith(Lo.getOperand(0).getReg(), Hi.getOperand(0).getReg());
574 Lo.eraseFromParent();
575 return true;
576}
577
578bool RISCVMergeBaseOffsetOpt::runOnMachineFunction(MachineFunction &Fn) {
579 if (skipFunction(Fn.getFunction()))
580 return false;
581
582 ST = &Fn.getSubtarget<RISCVSubtarget>();
583
584 bool MadeChange = false;
585 MRI = &Fn.getRegInfo();
586 for (MachineBasicBlock &MBB : Fn) {
587 LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
588 for (MachineInstr &Hi : MBB) {
589 MachineInstr *Lo = nullptr;
590 if (!detectFoldable(Hi, Lo))
591 continue;
592 MadeChange |= detectAndFoldOffset(Hi, *Lo);
593 MadeChange |= foldIntoMemoryOps(Hi, *Lo);
594 }
595 }
596
597 return MadeChange;
598}
599
600/// Returns an instance of the Merge Base Offset Optimization pass.
602 return new RISCVMergeBaseOffsetOpt();
603}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
#define DEBUG_TYPE
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
#define RISCV_MERGE_BASE_OFFSET_NAME
#define LLVM_DEBUG(...)
Definition Debug.h:114
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Definition Globals.cpp:132
Type * getValueType() const
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
LLVM_ABI void ChangeToMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
LLVM_ABI void ChangeToBA(const BlockAddress *BA, int64_t Offset, unsigned TargetFlags=0)
ChangeToBA - Replace this operand with a new block address operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
Register getReg() const
getReg - Returns the register number.
MCSymbol * getMCSymbol() const
@ MO_MCSymbol
MCSymbol reference (for debug/eh info)
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
FunctionPass * createRISCVMergeBaseOffsetOptPass()
Returns an instance of the Merge Base Offset Optimization pass.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572