LLVM 22.0.0git
RISCVMatInt.cpp
Go to the documentation of this file.
1//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "RISCVMatInt.h"
11#include "llvm/ADT/APInt.h"
14using namespace llvm;
15
16static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) {
17 if (!HasRVC)
18 return Res.size();
19
20 int Cost = 0;
21 for (auto Instr : Res) {
22 // Assume instructions that aren't listed aren't compressible.
23 bool Compressed = false;
24 switch (Instr.getOpcode()) {
25 case RISCV::QC_E_LI:
26 // One 48-bit instruction takes the space of 1.5 regular instructions.
27 Cost += 150;
28 continue;
29 case RISCV::SLLI:
30 case RISCV::SRLI:
31 Compressed = true;
32 break;
33 case RISCV::ADDI:
34 case RISCV::ADDIW:
35 case RISCV::LUI:
36 Compressed = isInt<6>(Instr.getImm());
37 break;
38 }
39 // Two RVC instructions take the same space as one RVI instruction, but
40 // can take longer to execute than the single RVI instruction. Thus, we
41 // consider that two RVC instruction are slightly more costly than one
42 // RVI instruction. For longer sequences of RVC instructions the space
43 // savings can be worth it, though. The costs below try to model that.
44 if (!Compressed)
45 Cost += 100; // Baseline cost of one RVI instruction: 100%.
46 else
47 Cost += 70; // 70% cost of baseline.
48 }
49 return Cost;
50}
51
52// Recursively generate a sequence for materializing an integer.
53static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
55 bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
56
57 // Use BSETI for a single bit that can't be expressed by a single LUI or ADDI.
58 if (STI.hasFeature(RISCV::FeatureStdExtZbs) && isPowerOf2_64(Val) &&
59 (!isInt<32>(Val) || Val == 0x800)) {
60 Res.emplace_back(RISCV::BSETI, Log2_64(Val));
61 return;
62 }
63
64 if (!IsRV64 && STI.hasFeature(RISCV::FeatureVendorXqcili)) {
65 bool FitsOneStandardInst = ((Val & 0xFFF) == 0) || isInt<12>(Val);
66
67 // 20-bit signed immediates that don't fit into `ADDI` or `LUI` should use
68 // `QC.LI` (a single 32-bit instruction).
69 if (!FitsOneStandardInst && isInt<20>(Val)) {
70 Res.emplace_back(RISCV::QC_LI, Val);
71 return;
72 }
73
74 // 32-bit signed immediates that don't fit into `ADDI`, `LUI` or `QC.LI`
75 // should use `QC.E.LI` (a single 48-bit instruction).
76 if (!FitsOneStandardInst && isInt<32>(Val)) {
77 Res.emplace_back(RISCV::QC_E_LI, Val);
78 return;
79 }
80 }
81
82 if (STI.hasFeature(RISCV::FeatureStdExtP)) {
83 // Check if the immediate is packed i8 or i10
84 int32_t Bit63To32 = Val >> 32;
85 int32_t Bit31To0 = Val;
86 int16_t Bit31To16 = Bit31To0 >> 16;
87 int16_t Bit15To0 = Bit31To0;
88 int8_t Bit15To8 = Bit15To0 >> 8;
89 int8_t Bit7To0 = Bit15To0;
90 if (Bit63To32 == Bit31To0) {
91 if (IsRV64 && isInt<10>(Bit63To32)) {
92 Res.emplace_back(RISCV::PLI_W, Bit63To32);
93 return;
94 }
95 if (Bit31To16 == Bit15To0) {
96 if (isInt<10>(Bit31To16)) {
97 Res.emplace_back(RISCV::PLI_H, Bit31To16);
98 return;
99 }
100 if (Bit15To8 == Bit7To0) {
101 Res.emplace_back(RISCV::PLI_B, Bit15To8);
102 return;
103 }
104 }
105 }
106 }
107
108 if (isInt<32>(Val)) {
109 // Depending on the active bits in the immediate Value v, the following
110 // instruction sequences are emitted:
111 //
112 // v == 0 : ADDI
113 // v[0,12) != 0 && v[12,32) == 0 : ADDI
114 // v[0,12) == 0 && v[12,32) != 0 : LUI
115 // v[0,32) != 0 : LUI+ADDI(W)
116 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
117 int64_t Lo12 = SignExtend64<12>(Val);
118
119 if (Hi20)
120 Res.emplace_back(RISCV::LUI, Hi20);
121
122 if (Lo12 || Hi20 == 0) {
123 unsigned AddiOpc = RISCV::ADDI;
124 if (IsRV64 && Hi20) {
125 // Use ADDIW rather than ADDI only when necessary for correctness. As
126 // noted in RISCVOptWInstrs, this helps reduce test differences vs
127 // RV32 without being a pessimization.
128 int64_t LuiRes = SignExtend64<32>(Hi20 << 12);
129 if (!isInt<32>(LuiRes + Lo12))
130 AddiOpc = RISCV::ADDIW;
131 }
132 Res.emplace_back(AddiOpc, Lo12);
133 }
134 return;
135 }
136
137 assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
138
139 // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
140 // (i.e., LUI+ADDI+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
141 // that the first two instructions (LUI+ADDI) can contribute up to 32 bits
142 // while the following ADDI instructions contribute up to 12 bits each.
143 //
144 // On the first glance, implementing this seems to be possible by simply
145 // emitting the most significant 32 bits (LUI+ADDI(W)) followed by as many
146 // left shift (SLLI) and immediate additions (ADDI) as needed. However, due to
147 // the fact that ADDI performs a sign extended addition, doing it like that
148 // would only be possible when at most 11 bits of the ADDI instructions are
149 // used. Using all 12 bits of the ADDI instructions, like done by GAS,
150 // actually requires that the constant is processed starting with the least
151 // significant bit.
152 //
153 // In the following, constants are processed from LSB to MSB but instruction
154 // emission is performed from MSB to LSB by recursively calling
155 // generateInstSeq. In each recursion, first the lowest 12 bits are removed
156 // from the constant and the optimal shift amount, which can be greater than
157 // 12 bits if the constant is sparse, is determined. Then, the shifted
158 // remaining constant is processed recursively and gets emitted as soon as it
159 // fits into 32 bits. The emission of the shifts and additions is subsequently
160 // performed when the recursion returns.
161
162 int64_t Lo12 = SignExtend64<12>(Val);
163 Val = (uint64_t)Val - (uint64_t)Lo12;
164
165 int ShiftAmount = 0;
166 bool Unsigned = false;
167
168 // Val might now be valid for LUI without needing a shift.
169 if (!isInt<32>(Val)) {
170 ShiftAmount = llvm::countr_zero((uint64_t)Val);
171 Val >>= ShiftAmount;
172
173 // If the remaining bits don't fit in 12 bits, we might be able to reduce
174 // the shift amount in order to use LUI which will zero the lower 12
175 // bits.
176 if (ShiftAmount > 12 && !isInt<12>(Val)) {
177 if (isInt<32>((uint64_t)Val << 12)) {
178 // Reduce the shift amount and add zeros to the LSBs so it will match
179 // LUI.
180 ShiftAmount -= 12;
181 Val = (uint64_t)Val << 12;
182 } else if (isUInt<32>((uint64_t)Val << 12) &&
183 STI.hasFeature(RISCV::FeatureStdExtZba)) {
184 // Reduce the shift amount and add zeros to the LSBs so it will match
185 // LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
186 ShiftAmount -= 12;
187 Val = SignExtend64<32>((uint64_t)Val << 12);
188 Unsigned = true;
189 }
190 }
191
192 // Try to use SLLI_UW for Val when it is uint32 but not int32.
193 if (isUInt<32>(Val) && !isInt<32>(Val) &&
194 STI.hasFeature(RISCV::FeatureStdExtZba)) {
195 // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
196 // SLLI_UW.
197 Val = SignExtend64<32>((uint64_t)Val);
198 Unsigned = true;
199 }
200 }
201
202 generateInstSeqImpl(Val, STI, Res);
203
204 // Skip shift if we were able to use LUI directly.
205 if (ShiftAmount) {
206 unsigned Opc = Unsigned ? RISCV::SLLI_UW : RISCV::SLLI;
207 Res.emplace_back(Opc, ShiftAmount);
208 }
209
210 if (Lo12)
211 Res.emplace_back(RISCV::ADDI, Lo12);
212}
213
214static unsigned extractRotateInfo(int64_t Val) {
215 // for case: 0b111..1..xxxxxx1..1..
216 unsigned LeadingOnes = llvm::countl_one((uint64_t)Val);
217 unsigned TrailingOnes = llvm::countr_one((uint64_t)Val);
218 if (TrailingOnes > 0 && TrailingOnes < 64 &&
219 (LeadingOnes + TrailingOnes) > (64 - 12))
220 return 64 - TrailingOnes;
221
222 // for case: 0bxxx1..1..1...xxx
223 unsigned UpperTrailingOnes = llvm::countr_one(Hi_32(Val));
224 unsigned LowerLeadingOnes = llvm::countl_one(Lo_32(Val));
225 if (UpperTrailingOnes < 32 &&
226 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
227 return 32 - UpperTrailingOnes;
228
229 return 0;
230}
231
232static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI,
234 assert(Val > 0 && "Expected positive val");
235
236 unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val);
237 uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
238 // Fill in the bits that will be shifted out with 1s. An example where this
239 // helps is trailing one masks with 32 or more ones. This will generate
240 // ADDI -1 and an SRLI.
241 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
242
244 generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
245
246 // Keep the new sequence if it is an improvement or the original is empty.
247 if ((TmpSeq.size() + 1) < Res.size() ||
248 (Res.empty() && TmpSeq.size() < 8)) {
249 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
250 Res = TmpSeq;
251 }
252
253 // Some cases can benefit from filling the lower bits with zeros instead.
254 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
255 TmpSeq.clear();
256 generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
257
258 // Keep the new sequence if it is an improvement or the original is empty.
259 if ((TmpSeq.size() + 1) < Res.size() ||
260 (Res.empty() && TmpSeq.size() < 8)) {
261 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
262 Res = TmpSeq;
263 }
264
265 // If we have exactly 32 leading zeros and Zba, we can try using zext.w at
266 // the end of the sequence.
267 if (LeadingZeros == 32 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
268 // Bit 31 is set, so sign extend to fill the upper bits with 1s.
269 uint64_t LeadingOnesVal = SignExtend64<32>(Val);
270 TmpSeq.clear();
271 generateInstSeqImpl(LeadingOnesVal, STI, TmpSeq);
272
273 // Keep the new sequence if it is an improvement.
274 if ((TmpSeq.size() + 1) < Res.size() ||
275 (Res.empty() && TmpSeq.size() < 8)) {
276 TmpSeq.emplace_back(RISCV::ADD_UW, 0);
277 Res = TmpSeq;
278 }
279 }
280}
281
283InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
285 generateInstSeqImpl(Val, STI, Res);
286
287 // If the low 12 bits are non-zero, the first expansion may end with an ADDI
288 // or ADDIW. If there are trailing zeros, try generating a sign extended
289 // constant with no trailing zeros and use a final SLLI to restore them.
290 if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) {
291 unsigned TrailingZeros = llvm::countr_zero((uint64_t)Val);
292 int64_t ShiftedVal = Val >> TrailingZeros;
293 // If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since
294 // its more compressible. But only if LUI+ADDI(W) isn't fusable.
295 // NOTE: We don't check for C extension to minimize differences in generated
296 // code.
297 bool IsShiftedCompressible =
298 isInt<6>(ShiftedVal) && !STI.hasFeature(RISCV::TuneLUIADDIFusion);
300 generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
301
302 // Keep the new sequence if it is an improvement.
303 if ((TmpSeq.size() + 1) < Res.size() || IsShiftedCompressible) {
304 TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros);
305 Res = TmpSeq;
306 }
307 }
308
309 // If we have a 1 or 2 instruction sequence this is the best we can do. This
310 // will always be true for RV32 and will often be true for RV64.
311 if (Res.size() <= 2)
312 return Res;
313
314 assert(STI.hasFeature(RISCV::Feature64Bit) &&
315 "Expected RV32 to only need 2 instructions");
316
317 // If the lower 13 bits are something like 0x17ff, try to add 1 to change the
318 // lower 13 bits to 0x1800. We can restore this with an ADDI of -1 at the end
319 // of the sequence. Call generateInstSeqImpl on the new constant which may
320 // subtract 0xfffffffffffff800 to create another ADDI. This will leave a
321 // constant with more than 12 trailing zeros for the next recursive step.
322 if ((Val & 0xfff) != 0 && (Val & 0x1800) == 0x1000) {
323 int64_t Imm12 = -(0x800 - (Val & 0xfff));
324 int64_t AdjustedVal = Val - Imm12;
326 generateInstSeqImpl(AdjustedVal, STI, TmpSeq);
327
328 // Keep the new sequence if it is an improvement.
329 if ((TmpSeq.size() + 1) < Res.size()) {
330 TmpSeq.emplace_back(RISCV::ADDI, Imm12);
331 Res = TmpSeq;
332 }
333 }
334
335 // If the constant is positive we might be able to generate a shifted constant
336 // with no leading zeros and use a final SRLI to restore them.
337 if (Val > 0 && Res.size() > 2) {
338 generateInstSeqLeadingZeros(Val, STI, Res);
339 }
340
341 // If the constant is negative, trying inverting and using our trailing zero
342 // optimizations. Use an xori to invert the final value.
343 if (Val < 0 && Res.size() > 3) {
344 uint64_t InvertedVal = ~(uint64_t)Val;
346 generateInstSeqLeadingZeros(InvertedVal, STI, TmpSeq);
347
348 // Keep it if we found a sequence that is smaller after inverting.
349 if (!TmpSeq.empty() && (TmpSeq.size() + 1) < Res.size()) {
350 TmpSeq.emplace_back(RISCV::XORI, -1);
351 Res = TmpSeq;
352 }
353 }
354
355 // If the Low and High halves are the same, use pack. The pack instruction
356 // packs the XLEN/2-bit lower halves of rs1 and rs2 into rd, with rs1 in the
357 // lower half and rs2 in the upper half.
358 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbkb)) {
359 int64_t LoVal = SignExtend64<32>(Val);
360 int64_t HiVal = SignExtend64<32>(Val >> 32);
361 if (LoVal == HiVal) {
363 generateInstSeqImpl(LoVal, STI, TmpSeq);
364 if ((TmpSeq.size() + 1) < Res.size()) {
365 TmpSeq.emplace_back(RISCV::PACK, 0);
366 Res = TmpSeq;
367 }
368 }
369 }
370
371 // Perform optimization with BSETI in the Zbs extension.
372 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbs)) {
373 // Create a simm32 value for LUI+ADDI(W) by forcing the upper 33 bits to
374 // zero. Xor that with original value to get which bits should be set by
375 // BSETI.
376 uint64_t Lo = Val & 0x7fffffff;
377 uint64_t Hi = Val ^ Lo;
378 assert(Hi != 0);
380
381 if (Lo != 0)
382 generateInstSeqImpl(Lo, STI, TmpSeq);
383
384 if (TmpSeq.size() + llvm::popcount(Hi) < Res.size()) {
385 do {
386 TmpSeq.emplace_back(RISCV::BSETI, llvm::countr_zero(Hi));
387 Hi &= (Hi - 1); // Clear lowest set bit.
388 } while (Hi != 0);
389 Res = TmpSeq;
390 }
391
392 // Fold LI 1 + SLLI into BSETI.
393 if (Res[0].getOpcode() == RISCV::ADDI && Res[0].getImm() == 1 &&
394 Res[1].getOpcode() == RISCV::SLLI) {
395 Res.erase(Res.begin()); // Remove ADDI.
396 Res.front() = Inst(RISCV::BSETI, Res.front().getImm()); // Patch SLLI.
397 }
398 }
399
400 // Perform optimization with BCLRI in the Zbs extension.
401 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbs)) {
402 // Create a simm32 value for LUI+ADDI(W) by forcing the upper 33 bits to
403 // one. Xor that with original value to get which bits should be cleared by
404 // BCLRI.
405 uint64_t Lo = Val | 0xffffffff80000000;
406 uint64_t Hi = Val ^ Lo;
407 assert(Hi != 0);
408
410 generateInstSeqImpl(Lo, STI, TmpSeq);
411
412 if (TmpSeq.size() + llvm::popcount(Hi) < Res.size()) {
413 do {
414 TmpSeq.emplace_back(RISCV::BCLRI, llvm::countr_zero(Hi));
415 Hi &= (Hi - 1); // Clear lowest set bit.
416 } while (Hi != 0);
417 Res = TmpSeq;
418 }
419 }
420
421 // Perform optimization with SH*ADD in the Zba extension.
422 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
423 int64_t Div = 0;
424 unsigned Opc = 0;
426 // Select the opcode and divisor.
427 if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
428 Div = 3;
429 Opc = RISCV::SH1ADD;
430 } else if ((Val % 5) == 0 && isInt<32>(Val / 5)) {
431 Div = 5;
432 Opc = RISCV::SH2ADD;
433 } else if ((Val % 9) == 0 && isInt<32>(Val / 9)) {
434 Div = 9;
435 Opc = RISCV::SH3ADD;
436 }
437 // Build the new instruction sequence.
438 if (Div > 0) {
439 generateInstSeqImpl(Val / Div, STI, TmpSeq);
440 if ((TmpSeq.size() + 1) < Res.size()) {
441 TmpSeq.emplace_back(Opc, 0);
442 Res = TmpSeq;
443 }
444 } else {
445 // Try to use LUI+SH*ADD+ADDI.
446 int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull;
447 int64_t Lo12 = SignExtend64<12>(Val);
448 Div = 0;
449 if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
450 Div = 3;
451 Opc = RISCV::SH1ADD;
452 } else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
453 Div = 5;
454 Opc = RISCV::SH2ADD;
455 } else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
456 Div = 9;
457 Opc = RISCV::SH3ADD;
458 }
459 // Build the new instruction sequence.
460 if (Div > 0) {
461 // For Val that has zero Lo12 (implies Val equals to Hi52) should has
462 // already been processed to LUI+SH*ADD by previous optimization.
463 assert(Lo12 != 0 &&
464 "unexpected instruction sequence for immediate materialisation");
465 assert(TmpSeq.empty() && "Expected empty TmpSeq");
466 generateInstSeqImpl(Hi52 / Div, STI, TmpSeq);
467 if ((TmpSeq.size() + 2) < Res.size()) {
468 TmpSeq.emplace_back(Opc, 0);
469 TmpSeq.emplace_back(RISCV::ADDI, Lo12);
470 Res = TmpSeq;
471 }
472 }
473 }
474 }
475
476 // Perform optimization with rori in the Zbb and th.srri in the XTheadBb
477 // extension.
478 if (Res.size() > 2 && (STI.hasFeature(RISCV::FeatureStdExtZbb) ||
479 STI.hasFeature(RISCV::FeatureVendorXTHeadBb))) {
480 if (unsigned Rotate = extractRotateInfo(Val)) {
482 uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
483 assert(isInt<12>(NegImm12));
484 TmpSeq.emplace_back(RISCV::ADDI, NegImm12);
485 TmpSeq.emplace_back(STI.hasFeature(RISCV::FeatureStdExtZbb)
486 ? RISCV::RORI
487 : RISCV::TH_SRRI,
488 Rotate);
489 Res = TmpSeq;
490 }
491 }
492 return Res;
493}
494
495void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI,
496 MCRegister DestReg, SmallVectorImpl<MCInst> &Insts) {
498
499 MCRegister SrcReg = RISCV::X0;
500 for (RISCVMatInt::Inst &Inst : Seq) {
501 switch (Inst.getOpndKind()) {
502 case RISCVMatInt::Imm:
504 .addReg(DestReg)
505 .addImm(Inst.getImm()));
506 break;
509 .addReg(DestReg)
510 .addReg(SrcReg)
511 .addReg(RISCV::X0));
512 break;
515 .addReg(DestReg)
516 .addReg(SrcReg)
517 .addReg(SrcReg));
518 break;
521 .addReg(DestReg)
522 .addReg(SrcReg)
523 .addImm(Inst.getImm()));
524 break;
525 }
526
527 // Only the first instruction has X0 as its source.
528 SrcReg = DestReg;
529 }
530}
531
533 unsigned &ShiftAmt, unsigned &AddOpc) {
534 int64_t LoVal = SignExtend64<32>(Val);
535 if (LoVal == 0)
536 return RISCVMatInt::InstSeq();
537
538 // Subtract the LoVal to emulate the effect of the final ADD.
539 uint64_t Tmp = (uint64_t)Val - (uint64_t)LoVal;
540 assert(Tmp != 0);
541
542 // Use trailing zero counts to figure how far we need to shift LoVal to line
543 // up with the remaining constant.
544 // TODO: This algorithm assumes all non-zero bits in the low 32 bits of the
545 // final constant come from LoVal.
546 unsigned TzLo = llvm::countr_zero((uint64_t)LoVal);
547 unsigned TzHi = llvm::countr_zero(Tmp);
548 assert(TzLo < 32 && TzHi >= 32);
549 ShiftAmt = TzHi - TzLo;
550 AddOpc = RISCV::ADD;
551
552 if (Tmp == ((uint64_t)LoVal << ShiftAmt))
553 return RISCVMatInt::generateInstSeq(LoVal, STI);
554
555 // If we have Zba, we can use (ADD_UW X, (SLLI X, 32)).
556 if (STI.hasFeature(RISCV::FeatureStdExtZba) && Lo_32(Val) == Hi_32(Val)) {
557 ShiftAmt = 32;
558 AddOpc = RISCV::ADD_UW;
559 return RISCVMatInt::generateInstSeq(LoVal, STI);
560 }
561
562 return RISCVMatInt::InstSeq();
563}
564
565int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI,
566 bool CompressionCost, bool FreeZeroes) {
567 bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
568 bool HasRVC = CompressionCost && STI.hasFeature(RISCV::FeatureStdExtZca);
569 int PlatRegSize = IsRV64 ? 64 : 32;
570
571 // Split the constant into platform register sized chunks, and calculate cost
572 // of each chunk.
573 int Cost = 0;
574 for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
575 APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
576 if (FreeZeroes && Chunk.getSExtValue() == 0)
577 continue;
578 InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), STI);
579 Cost += getInstSeqCost(MatSeq, HasRVC);
580 }
581 return std::max(FreeZeroes ? 0 : 1, Cost);
582}
583
585 switch (Opc) {
586 default:
587 llvm_unreachable("Unexpected opcode!");
588 case RISCV::LUI:
589 case RISCV::QC_LI:
590 case RISCV::QC_E_LI:
591 case RISCV::PLI_B:
592 case RISCV::PLI_H:
593 case RISCV::PLI_W:
594 return RISCVMatInt::Imm;
595 case RISCV::ADD_UW:
596 return RISCVMatInt::RegX0;
597 case RISCV::SH1ADD:
598 case RISCV::SH2ADD:
599 case RISCV::SH3ADD:
600 case RISCV::PACK:
601 return RISCVMatInt::RegReg;
602 case RISCV::ADDI:
603 case RISCV::ADDIW:
604 case RISCV::XORI:
605 case RISCV::SLLI:
606 case RISCV::SRLI:
607 case RISCV::SLLI_UW:
608 case RISCV::RORI:
609 case RISCV::BSETI:
610 case RISCV::BCLRI:
611 case RISCV::TH_SRRI:
612 return RISCVMatInt::RegImm;
613 }
614}
615
616} // namespace llvm::RISCVMatInt
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static unsigned extractRotateInfo(int64_t Val)
static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
int64_t getImm() const
Definition RISCVMatInt.h:39
unsigned getOpcode() const
Definition RISCVMatInt.h:38
OpndKind getOpndKind() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void push_back(const T &Elt)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI, MCRegister DestReg, SmallVectorImpl< MCInst > &Insts)
This is an optimization pass for GlobalISel generic memory operations.
InstructionCost Cost
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:236
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
int countl_one(T Value)
Count the number of ones from the most significant bit to the first zero bit.
Definition bit.h:280
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
constexpr T rotl(T V, int R)
Definition bit.h:369