LLVM 23.0.0git
RISCVLegalizerInfo.cpp
Go to the documentation of this file.
1//===-- RISCVLegalizerInfo.cpp ----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for RISC-V.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "RISCVLegalizerInfo.h"
16#include "RISCVSubtarget.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/IntrinsicsRISCV.h"
31#include "llvm/IR/Type.h"
32
33using namespace llvm;
34using namespace LegalityPredicates;
35using namespace LegalizeMutations;
36
38typeIsLegalIntOrFPVec(unsigned TypeIdx,
39 std::initializer_list<LLT> IntOrFPVecTys,
40 const RISCVSubtarget &ST) {
41 LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
42 return ST.hasVInstructions() &&
43 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
44 ST.hasVInstructionsI64()) &&
45 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
46 ST.getELen() == 64);
47 };
48
49 return all(typeInSet(TypeIdx, IntOrFPVecTys), P);
50}
51
53typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list<LLT> BoolVecTys,
54 const RISCVSubtarget &ST) {
55 LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
56 return ST.hasVInstructions() &&
57 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
58 ST.getELen() == 64);
59 };
60 return all(typeInSet(TypeIdx, BoolVecTys), P);
61}
62
63static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx,
64 std::initializer_list<LLT> PtrVecTys,
65 const RISCVSubtarget &ST) {
66 LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
67 return ST.hasVInstructions() &&
68 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
69 ST.getELen() == 64) &&
70 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
71 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
72 };
73 return all(typeInSet(TypeIdx, PtrVecTys), P);
74}
75
77 : STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
78 const LLT sDoubleXLen = LLT::scalar(2 * XLen);
79 const LLT p0 = LLT::pointer(0, XLen);
80 const LLT s1 = LLT::scalar(1);
81 const LLT s8 = LLT::scalar(8);
82 const LLT s16 = LLT::scalar(16);
83 const LLT s32 = LLT::scalar(32);
84 const LLT s64 = LLT::scalar(64);
85 const LLT s128 = LLT::scalar(128);
86
87 const LLT nxv1s1 = LLT::scalable_vector(1, s1);
88 const LLT nxv2s1 = LLT::scalable_vector(2, s1);
89 const LLT nxv4s1 = LLT::scalable_vector(4, s1);
90 const LLT nxv8s1 = LLT::scalable_vector(8, s1);
91 const LLT nxv16s1 = LLT::scalable_vector(16, s1);
92 const LLT nxv32s1 = LLT::scalable_vector(32, s1);
93 const LLT nxv64s1 = LLT::scalable_vector(64, s1);
94
95 const LLT nxv1s8 = LLT::scalable_vector(1, s8);
96 const LLT nxv2s8 = LLT::scalable_vector(2, s8);
97 const LLT nxv4s8 = LLT::scalable_vector(4, s8);
98 const LLT nxv8s8 = LLT::scalable_vector(8, s8);
99 const LLT nxv16s8 = LLT::scalable_vector(16, s8);
100 const LLT nxv32s8 = LLT::scalable_vector(32, s8);
101 const LLT nxv64s8 = LLT::scalable_vector(64, s8);
102
103 const LLT nxv1s16 = LLT::scalable_vector(1, s16);
104 const LLT nxv2s16 = LLT::scalable_vector(2, s16);
105 const LLT nxv4s16 = LLT::scalable_vector(4, s16);
106 const LLT nxv8s16 = LLT::scalable_vector(8, s16);
107 const LLT nxv16s16 = LLT::scalable_vector(16, s16);
108 const LLT nxv32s16 = LLT::scalable_vector(32, s16);
109
110 const LLT nxv1s32 = LLT::scalable_vector(1, s32);
111 const LLT nxv2s32 = LLT::scalable_vector(2, s32);
112 const LLT nxv4s32 = LLT::scalable_vector(4, s32);
113 const LLT nxv8s32 = LLT::scalable_vector(8, s32);
114 const LLT nxv16s32 = LLT::scalable_vector(16, s32);
115
116 const LLT nxv1s64 = LLT::scalable_vector(1, s64);
117 const LLT nxv2s64 = LLT::scalable_vector(2, s64);
118 const LLT nxv4s64 = LLT::scalable_vector(4, s64);
119 const LLT nxv8s64 = LLT::scalable_vector(8, s64);
120
121 const LLT nxv1p0 = LLT::scalable_vector(1, p0);
122 const LLT nxv2p0 = LLT::scalable_vector(2, p0);
123 const LLT nxv4p0 = LLT::scalable_vector(4, p0);
124 const LLT nxv8p0 = LLT::scalable_vector(8, p0);
125 const LLT nxv16p0 = LLT::scalable_vector(16, p0);
126
127 using namespace TargetOpcode;
128
129 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
130
131 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
132 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
133 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
134 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
135
136 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
137
138 getActionDefinitionsBuilder({G_ADD, G_SUB})
139 .legalFor({sXLen})
140 .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
141 .customFor(ST.is64Bit(), {s32})
143 .clampScalar(0, sXLen, sXLen);
144
145 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
146 .legalFor({sXLen})
147 .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
149 .clampScalar(0, sXLen, sXLen);
150
152 {G_UADDE, G_UADDO, G_USUBE, G_USUBO, G_READ_REGISTER, G_WRITE_REGISTER})
153 .lower();
154
155 getActionDefinitionsBuilder({G_SADDE, G_SADDO, G_SSUBE, G_SSUBO})
156 .minScalar(0, sXLen)
157 .lower();
158
159 // TODO: Use Vector Single-Width Saturating Instructions for vector types.
161 {G_UADDSAT, G_SADDSAT, G_USUBSAT, G_SSUBSAT, G_SSHLSAT, G_USHLSAT})
162 .lower();
163
164 getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
165 .legalFor({{sXLen, sXLen}})
166 .customFor(ST.is64Bit(), {{s32, s32}})
167 .widenScalarToNextPow2(0)
168 .clampScalar(1, sXLen, sXLen)
169 .clampScalar(0, sXLen, sXLen);
170
171 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
172 .legalFor({{s32, s16}})
173 .legalFor(ST.is64Bit(), {{s64, s16}, {s64, s32}})
174 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
175 typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
176 .customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
177 .maxScalar(0, sXLen);
178
179 getActionDefinitionsBuilder(G_SEXT_INREG)
180 .customFor({sXLen})
181 .clampScalar(0, sXLen, sXLen)
182 .lower();
183
184 // Merge/Unmerge
185 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
186 auto &MergeUnmergeActions = getActionDefinitionsBuilder(Op);
187 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
188 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
189 if (XLen == 32 && ST.hasStdExtD()) {
190 MergeUnmergeActions.legalIf(
191 all(typeIs(BigTyIdx, s64), typeIs(LitTyIdx, s32)));
192 }
193 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
194 .widenScalarToNextPow2(BigTyIdx, XLen)
195 .clampScalar(LitTyIdx, sXLen, sXLen)
196 .clampScalar(BigTyIdx, sXLen, sXLen);
197 }
198
199 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
200
201 getActionDefinitionsBuilder({G_ROTR, G_ROTL})
202 .legalFor(ST.hasStdExtZbb() || ST.hasStdExtZbkb(), {{sXLen, sXLen}})
203 .customFor(ST.is64Bit() && (ST.hasStdExtZbb() || ST.hasStdExtZbkb()),
204 {{s32, s32}})
205 .lower();
206
207 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
208
209 getActionDefinitionsBuilder(G_BITCAST).legalIf(
211 typeIsLegalBoolVec(0, BoolVecTys, ST)),
213 typeIsLegalBoolVec(1, BoolVecTys, ST))));
214
215 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
216 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
217 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
218 else
219 BSWAPActions.maxScalar(0, sXLen).lower();
220
221 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
222 auto &CountZerosUndefActions =
223 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
224 if (ST.hasStdExtZbb()) {
225 CountZerosActions.legalFor({{sXLen, sXLen}})
226 .customFor({{s32, s32}})
227 .clampScalar(0, s32, sXLen)
228 .widenScalarToNextPow2(0)
229 .scalarSameSizeAs(1, 0);
230 } else {
231 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
232 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
233 }
234 CountZerosUndefActions.lower();
235
236 auto &CountSignActions = getActionDefinitionsBuilder(G_CTLS);
237 if (ST.hasStdExtP()) {
238 CountSignActions.legalFor({{sXLen, sXLen}})
239 .customFor({{s32, s32}})
240 .clampScalar(0, s32, sXLen)
241 .widenScalarToNextPow2(0)
242 .scalarSameSizeAs(1, 0);
243 } else {
244 CountSignActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
245 }
246
247 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
248 if (ST.hasStdExtZbb()) {
249 CTPOPActions.legalFor({{sXLen, sXLen}})
250 .clampScalar(0, sXLen, sXLen)
251 .scalarSameSizeAs(1, 0);
252 } else {
253 CTPOPActions.widenScalarToNextPow2(0, /*Min*/ 8)
254 .clampScalar(0, s8, sXLen)
255 .scalarSameSizeAs(1, 0)
256 .lower();
257 }
258
259 getActionDefinitionsBuilder(G_CONSTANT)
260 .legalFor({p0})
261 .legalFor(!ST.is64Bit(), {s32})
262 .customFor(ST.is64Bit(), {s64})
263 .widenScalarToNextPow2(0)
264 .clampScalar(0, sXLen, sXLen);
265
266 // TODO: transform illegal vector types into legal vector type
267 getActionDefinitionsBuilder(G_FREEZE)
268 .legalFor({s16, s32, p0})
269 .legalFor(ST.is64Bit(), {s64})
270 .legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
271 .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
272 .widenScalarToNextPow2(0)
273 .clampScalar(0, s16, sXLen);
274
275 // TODO: transform illegal vector types into legal vector type
276 // TODO: Merge with G_FREEZE?
277 getActionDefinitionsBuilder(
278 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
279 .legalFor({s32, sXLen, p0})
280 .legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
281 .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
282 .widenScalarToNextPow2(0)
283 .clampScalar(0, s32, sXLen);
284
285 getActionDefinitionsBuilder(G_ICMP)
286 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
287 .legalIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
288 typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
289 .widenScalarOrEltToNextPow2OrMinSize(1, 8)
290 .clampScalar(1, sXLen, sXLen)
291 .clampScalar(0, sXLen, sXLen);
292
293 getActionDefinitionsBuilder(G_SELECT)
294 .legalFor({{s32, sXLen}, {p0, sXLen}})
295 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
296 typeIsLegalBoolVec(1, BoolVecTys, ST)))
297 .legalFor(XLen == 64 || ST.hasStdExtD(), {{s64, sXLen}})
298 .widenScalarToNextPow2(0)
299 .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
300 .clampScalar(1, sXLen, sXLen);
301
302 auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
303 auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
304 auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
305
306 // Return the alignment needed for scalar memory ops. If unaligned scalar mem
307 // is supported, we only require byte alignment. Otherwise, we need the memory
308 // op to be natively aligned.
309 auto getScalarMemAlign = [&ST](unsigned Size) {
310 return ST.enableUnalignedScalarMem() ? 8 : Size;
311 };
312
313 LoadActions.legalForTypesWithMemDesc(
314 {{s16, p0, s8, getScalarMemAlign(8)},
315 {s32, p0, s8, getScalarMemAlign(8)},
316 {s16, p0, s16, getScalarMemAlign(16)},
317 {s32, p0, s16, getScalarMemAlign(16)},
318 {s32, p0, s32, getScalarMemAlign(32)},
319 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
320 StoreActions.legalForTypesWithMemDesc(
321 {{s16, p0, s8, getScalarMemAlign(8)},
322 {s32, p0, s8, getScalarMemAlign(8)},
323 {s16, p0, s16, getScalarMemAlign(16)},
324 {s32, p0, s16, getScalarMemAlign(16)},
325 {s32, p0, s32, getScalarMemAlign(32)},
326 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
327 ExtLoadActions.legalForTypesWithMemDesc(
328 {{sXLen, p0, s8, getScalarMemAlign(8)},
329 {sXLen, p0, s16, getScalarMemAlign(16)}});
330 if (XLen == 64) {
331 LoadActions.legalForTypesWithMemDesc(
332 {{s64, p0, s8, getScalarMemAlign(8)},
333 {s64, p0, s16, getScalarMemAlign(16)},
334 {s64, p0, s32, getScalarMemAlign(32)},
335 {s64, p0, s64, getScalarMemAlign(64)}});
336 StoreActions.legalForTypesWithMemDesc(
337 {{s64, p0, s8, getScalarMemAlign(8)},
338 {s64, p0, s16, getScalarMemAlign(16)},
339 {s64, p0, s32, getScalarMemAlign(32)},
340 {s64, p0, s64, getScalarMemAlign(64)}});
341 ExtLoadActions.legalForTypesWithMemDesc(
342 {{s64, p0, s32, getScalarMemAlign(32)}});
343 } else if (ST.hasStdExtD()) {
344 LoadActions.legalForTypesWithMemDesc(
345 {{s64, p0, s64, getScalarMemAlign(64)}});
346 StoreActions.legalForTypesWithMemDesc(
347 {{s64, p0, s64, getScalarMemAlign(64)}});
348 }
349
350 // Vector loads/stores.
351 if (ST.hasVInstructions()) {
352 LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
353 {nxv4s8, p0, nxv4s8, 8},
354 {nxv8s8, p0, nxv8s8, 8},
355 {nxv16s8, p0, nxv16s8, 8},
356 {nxv32s8, p0, nxv32s8, 8},
357 {nxv64s8, p0, nxv64s8, 8},
358 {nxv2s16, p0, nxv2s16, 16},
359 {nxv4s16, p0, nxv4s16, 16},
360 {nxv8s16, p0, nxv8s16, 16},
361 {nxv16s16, p0, nxv16s16, 16},
362 {nxv32s16, p0, nxv32s16, 16},
363 {nxv2s32, p0, nxv2s32, 32},
364 {nxv4s32, p0, nxv4s32, 32},
365 {nxv8s32, p0, nxv8s32, 32},
366 {nxv16s32, p0, nxv16s32, 32}});
367 StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
368 {nxv4s8, p0, nxv4s8, 8},
369 {nxv8s8, p0, nxv8s8, 8},
370 {nxv16s8, p0, nxv16s8, 8},
371 {nxv32s8, p0, nxv32s8, 8},
372 {nxv64s8, p0, nxv64s8, 8},
373 {nxv2s16, p0, nxv2s16, 16},
374 {nxv4s16, p0, nxv4s16, 16},
375 {nxv8s16, p0, nxv8s16, 16},
376 {nxv16s16, p0, nxv16s16, 16},
377 {nxv32s16, p0, nxv32s16, 16},
378 {nxv2s32, p0, nxv2s32, 32},
379 {nxv4s32, p0, nxv4s32, 32},
380 {nxv8s32, p0, nxv8s32, 32},
381 {nxv16s32, p0, nxv16s32, 32}});
382
383 if (ST.getELen() == 64) {
384 LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
385 {nxv1s16, p0, nxv1s16, 16},
386 {nxv1s32, p0, nxv1s32, 32}});
387 StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
388 {nxv1s16, p0, nxv1s16, 16},
389 {nxv1s32, p0, nxv1s32, 32}});
390 }
391
392 if (ST.hasVInstructionsI64()) {
393 LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
394 {nxv2s64, p0, nxv2s64, 64},
395 {nxv4s64, p0, nxv4s64, 64},
396 {nxv8s64, p0, nxv8s64, 64}});
397 StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
398 {nxv2s64, p0, nxv2s64, 64},
399 {nxv4s64, p0, nxv4s64, 64},
400 {nxv8s64, p0, nxv8s64, 64}});
401 }
402
403 // we will take the custom lowering logic if we have scalable vector types
404 // with non-standard alignments
405 LoadActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
406 StoreActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
407
408 // Pointers require that XLen sized elements are legal.
409 if (XLen <= ST.getELen()) {
410 LoadActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
411 StoreActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
412 }
413 }
414
415 LoadActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
416 .lowerIfMemSizeNotByteSizePow2()
417 .clampScalar(0, s16, sXLen)
418 .lower();
419 StoreActions
420 .clampScalar(0, s16, sXLen)
421 .lowerIfMemSizeNotByteSizePow2()
422 .lower();
423
424 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, sXLen, sXLen).lower();
425
426 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
427
428 getActionDefinitionsBuilder(G_PTRTOINT)
429 .legalFor({{sXLen, p0}})
430 .clampScalar(0, sXLen, sXLen);
431
432 getActionDefinitionsBuilder(G_INTTOPTR)
433 .legalFor({{p0, sXLen}})
434 .clampScalar(1, sXLen, sXLen);
435
436 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
437
438 getActionDefinitionsBuilder(G_BRJT).customFor({{p0, sXLen}});
439
440 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
441
442 getActionDefinitionsBuilder(G_PHI)
443 .legalFor({p0, s32, sXLen})
444 .widenScalarToNextPow2(0)
445 .clampScalar(0, s32, sXLen);
446
447 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
448 .legalFor({p0});
449
450 if (ST.hasStdExtZmmul()) {
451 getActionDefinitionsBuilder(G_MUL)
452 .legalFor({sXLen})
453 .widenScalarToNextPow2(0)
454 .clampScalar(0, sXLen, sXLen);
455
456 // clang-format off
457 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
458 .legalFor({sXLen})
459 .lower();
460 // clang-format on
461
462 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
463 } else {
464 getActionDefinitionsBuilder(G_MUL)
465 .libcallFor({sXLen, sDoubleXLen})
466 .widenScalarToNextPow2(0)
467 .clampScalar(0, sXLen, sDoubleXLen);
468
469 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
470
471 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
472 .minScalar(0, sXLen)
473 // Widen sXLen to sDoubleXLen so we can use a single libcall to get
474 // the low bits for the mul result and high bits to do the overflow
475 // check.
476 .widenScalarIf(typeIs(0, sXLen),
477 LegalizeMutations::changeTo(0, sDoubleXLen))
478 .lower();
479 }
480
481 if (ST.hasStdExtM()) {
482 getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_UREM})
483 .legalFor({sXLen})
484 .customFor({s32})
485 .libcallFor({sDoubleXLen})
486 .clampScalar(0, s32, sDoubleXLen)
487 .widenScalarToNextPow2(0);
488 getActionDefinitionsBuilder(G_SREM)
489 .legalFor({sXLen})
490 .libcallFor({sDoubleXLen})
491 .clampScalar(0, sXLen, sDoubleXLen)
492 .widenScalarToNextPow2(0);
493 } else {
494 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
495 .libcallFor({sXLen, sDoubleXLen})
496 .clampScalar(0, sXLen, sDoubleXLen)
497 .widenScalarToNextPow2(0);
498 }
499
500 // TODO: Use libcall for sDoubleXLen.
501 getActionDefinitionsBuilder({G_SDIVREM, G_UDIVREM}).lower();
502
503 getActionDefinitionsBuilder(G_ABS)
504 .customFor(ST.hasStdExtZbb(), {sXLen})
505 .minScalar(ST.hasStdExtZbb(), 0, sXLen)
506 .lower();
507
508 getActionDefinitionsBuilder({G_ABDS, G_ABDU})
509 .minScalar(ST.hasStdExtZbb(), 0, sXLen)
510 .lower();
511
512 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN})
513 .legalFor(ST.hasStdExtZbb(), {sXLen})
514 .minScalar(ST.hasStdExtZbb(), 0, sXLen)
515 .lower();
516
517 getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower();
518
519 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
520
521 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
522
523 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
524 .lower();
525
526 // FP Operations
527
528 // FIXME: Support s128 for rv32 when libcall handling is able to use sret.
529 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT,
530 G_FMAXNUM, G_FMINNUM, G_FMAXIMUMNUM,
531 G_FMINIMUMNUM})
532 .legalFor(ST.hasStdExtF(), {s32})
533 .legalFor(ST.hasStdExtD(), {s64})
534 .legalFor(ST.hasStdExtZfh(), {s16})
535 .libcallFor({s32, s64})
536 .libcallFor(ST.is64Bit(), {s128});
537
538 getActionDefinitionsBuilder({G_FNEG, G_FABS})
539 .legalFor(ST.hasStdExtF(), {s32})
540 .legalFor(ST.hasStdExtD(), {s64})
541 .legalFor(ST.hasStdExtZfh(), {s16})
542 .lowerFor({s32, s64, s128});
543
544 getActionDefinitionsBuilder(G_FREM)
545 .libcallFor({s32, s64})
546 .libcallFor(ST.is64Bit(), {s128})
547 .minScalar(0, s32)
548 .scalarize(0);
549
550 getActionDefinitionsBuilder(G_FCOPYSIGN)
551 .legalFor(ST.hasStdExtF(), {{s32, s32}})
552 .legalFor(ST.hasStdExtD(), {{s64, s64}, {s32, s64}, {s64, s32}})
553 .legalFor(ST.hasStdExtZfh(), {{s16, s16}, {s16, s32}, {s32, s16}})
554 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s16, s64}, {s64, s16}})
555 .lower();
556
557 // FIXME: Use Zfhmin.
558 getActionDefinitionsBuilder(G_FPTRUNC)
559 .legalFor(ST.hasStdExtD(), {{s32, s64}})
560 .legalFor(ST.hasStdExtZfh(), {{s16, s32}})
561 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s16, s64}})
562 .libcallFor({{s32, s64}})
563 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}});
564 getActionDefinitionsBuilder(G_FPEXT)
565 .legalFor(ST.hasStdExtD(), {{s64, s32}})
566 .legalFor(ST.hasStdExtZfh(), {{s32, s16}})
567 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s64, s16}})
568 .libcallFor({{s64, s32}})
569 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}});
570
571 getActionDefinitionsBuilder(G_FCMP)
572 .legalFor(ST.hasStdExtF(), {{sXLen, s32}})
573 .legalFor(ST.hasStdExtD(), {{sXLen, s64}})
574 .legalFor(ST.hasStdExtZfh(), {{sXLen, s16}})
575 .clampScalar(0, sXLen, sXLen)
576 .libcallFor({{sXLen, s32}, {sXLen, s64}})
577 .libcallFor(ST.is64Bit(), {{sXLen, s128}});
578
579 // TODO: Support vector version of G_IS_FPCLASS.
580 getActionDefinitionsBuilder(G_IS_FPCLASS)
581 .customFor(ST.hasStdExtF(), {{s1, s32}})
582 .customFor(ST.hasStdExtD(), {{s1, s64}})
583 .customFor(ST.hasStdExtZfh(), {{s1, s16}})
584 .lowerFor({{s1, s32}, {s1, s64}});
585
586 getActionDefinitionsBuilder(G_FCONSTANT)
587 .legalFor(ST.hasStdExtF(), {s32})
588 .legalFor(ST.hasStdExtD(), {s64})
589 .legalFor(ST.hasStdExtZfh(), {s16})
590 .customFor(!ST.is64Bit(), {s32})
591 .customFor(ST.is64Bit(), {s32, s64})
592 .lowerFor({s64, s128});
593
594 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
595 .legalFor(ST.hasStdExtF(), {{sXLen, s32}})
596 .legalFor(ST.hasStdExtD(), {{sXLen, s64}})
597 .legalFor(ST.hasStdExtZfh(), {{sXLen, s16}})
598 .customFor(ST.is64Bit() && ST.hasStdExtF(), {{s32, s32}})
599 .customFor(ST.is64Bit() && ST.hasStdExtD(), {{s32, s64}})
600 .customFor(ST.is64Bit() && ST.hasStdExtZfh(), {{s32, s16}})
601 .widenScalarToNextPow2(0)
602 .minScalar(0, s32)
603 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
604 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}}) // FIXME RV32.
605 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}, {s128, s128}});
606
607 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
608 .legalFor(ST.hasStdExtF(), {{s32, sXLen}})
609 .legalFor(ST.hasStdExtD(), {{s64, sXLen}})
610 .legalFor(ST.hasStdExtZfh(), {{s16, sXLen}})
611 .widenScalarToNextPow2(1)
612 // Promote to XLen if the operation is legal.
613 .widenScalarIf(
614 [=, &ST](const LegalityQuery &Query) {
615 return Query.Types[0].isScalar() && Query.Types[1].isScalar() &&
616 (Query.Types[1].getSizeInBits() < ST.getXLen()) &&
617 ((ST.hasStdExtF() && Query.Types[0].getSizeInBits() == 32) ||
618 (ST.hasStdExtD() && Query.Types[0].getSizeInBits() == 64) ||
619 (ST.hasStdExtZfh() &&
620 Query.Types[0].getSizeInBits() == 16));
621 },
623 // Otherwise only promote to s32 since we have si libcalls.
624 .minScalar(1, s32)
625 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
626 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}}) // FIXME RV32.
627 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}, {s128, s128}});
628
629 // FIXME: We can do custom inline expansion like SelectionDAG.
630 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
631 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
632 G_INTRINSIC_ROUNDEVEN})
633 .legalFor(ST.hasStdExtZfa(), {s32})
634 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtD(), {s64})
635 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtZfh(), {s16})
636 .libcallFor({s32, s64})
637 .libcallFor(ST.is64Bit(), {s128});
638
639 getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
640 .legalFor(ST.hasStdExtZfa(), {s32})
641 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtD(), {s64})
642 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtZfh(), {s16});
643
644 getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2,
645 G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS,
646 G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH,
647 G_FTANH, G_FMODF})
648 .libcallFor({s32, s64})
649 .libcallFor(ST.is64Bit(), {s128});
650 getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP})
651 .libcallFor({{s32, s32}, {s64, s32}})
652 .libcallFor(ST.is64Bit(), {s128, s32});
653
654 getActionDefinitionsBuilder(G_FCANONICALIZE)
655 .legalFor(ST.hasStdExtF(), {s32})
656 .legalFor(ST.hasStdExtD(), {s64})
657 .legalFor(ST.hasStdExtZfh(), {s16});
658
659 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
660
661 // va_list must be a pointer, but most sized types are pretty easy to handle
662 // as the destination.
663 getActionDefinitionsBuilder(G_VAARG)
664 // TODO: Implement narrowScalar and widenScalar for G_VAARG for types
665 // other than sXLen.
666 .clampScalar(0, sXLen, sXLen)
667 .lowerForCartesianProduct({sXLen, p0}, {p0});
668
669 getActionDefinitionsBuilder(G_VSCALE)
670 .clampScalar(0, sXLen, sXLen)
671 .customFor({sXLen});
672
673 auto &SplatActions =
674 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
675 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
676 typeIs(1, sXLen)))
677 .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST), typeIs(1, s1)));
678 // Handle case of s64 element vectors on RV32. If the subtarget does not have
679 // f64, then try to lower it to G_SPLAT_VECTOR_SPLIT_64_VL. If the subtarget
680 // does have f64, then we don't know whether the type is an f64 or an i64,
681 // so mark the G_SPLAT_VECTOR as legal and decide later what to do with it,
682 // depending on how the instructions it consumes are legalized. They are not
683 // legalized yet since legalization is in reverse postorder, so we cannot
684 // make the decision at this moment.
685 if (XLen == 32) {
686 if (ST.hasVInstructionsF64() && ST.hasStdExtD())
687 SplatActions.legalIf(all(
688 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
689 else if (ST.hasVInstructionsI64())
690 SplatActions.customIf(all(
691 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
692 }
693
694 SplatActions.clampScalar(1, sXLen, sXLen);
695
696 LegalityPredicate ExtractSubvecBitcastPred = [=](const LegalityQuery &Query) {
697 LLT DstTy = Query.Types[0];
698 LLT SrcTy = Query.Types[1];
699 return DstTy.getElementType() == LLT::scalar(1) &&
700 DstTy.getElementCount().getKnownMinValue() >= 8 &&
701 SrcTy.getElementCount().getKnownMinValue() >= 8;
702 };
703 getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
704 // We don't have the ability to slide mask vectors down indexed by their
705 // i1 elements; the smallest we can do is i8. Often we are able to bitcast
706 // to equivalent i8 vectors.
707 .bitcastIf(
708 all(typeIsLegalBoolVec(0, BoolVecTys, ST),
709 typeIsLegalBoolVec(1, BoolVecTys, ST), ExtractSubvecBitcastPred),
710 [=](const LegalityQuery &Query) {
711 LLT CastTy = LLT::vector(
712 Query.Types[0].getElementCount().divideCoefficientBy(8), 8);
713 return std::pair(0, CastTy);
714 })
715 .customIf(LegalityPredicates::any(
716 all(typeIsLegalBoolVec(0, BoolVecTys, ST),
717 typeIsLegalBoolVec(1, BoolVecTys, ST)),
718 all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
719 typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST))));
720
721 getActionDefinitionsBuilder(G_INSERT_SUBVECTOR)
722 .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
723 typeIsLegalBoolVec(1, BoolVecTys, ST)))
724 .customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
725 typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
726
727 getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
728 .lowerIf(all(typeInSet(0, {s8, s16, s32, s64}), typeIs(2, p0)));
729
730 getActionDefinitionsBuilder({G_ATOMIC_CMPXCHG, G_ATOMICRMW_ADD})
731 .legalFor(ST.hasStdExtA(), {{sXLen, p0}})
732 .libcallFor(!ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
733 .clampScalar(0, sXLen, sXLen);
734
735 getActionDefinitionsBuilder(G_ATOMICRMW_SUB)
736 .libcallFor(!ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
737 .clampScalar(0, sXLen, sXLen)
738 .lower();
739
740 LegalityPredicate InsertVectorEltPred = [=](const LegalityQuery &Query) {
741 LLT VecTy = Query.Types[0];
742 LLT EltTy = Query.Types[1];
743 return VecTy.getElementType() == EltTy;
744 };
745
746 getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
747 .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
748 InsertVectorEltPred, typeIs(2, sXLen)))
749 .legalIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST), InsertVectorEltPred,
750 typeIs(2, sXLen)));
751
752 getLegacyLegalizerInfo().computeTables();
753 verify(*ST.getInstrInfo());
754}
755
757 MachineInstr &MI) const {
758 Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
759
761 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
762 if (II->hasScalarOperand() && !II->IsFPIntrinsic) {
763 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
764 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
765
766 auto OldScalar = MI.getOperand(II->ScalarOperand + 2).getReg();
767 // Legalize integer vx form intrinsic.
768 if (MRI.getType(OldScalar).isScalar()) {
769 if (MRI.getType(OldScalar).getSizeInBits() < sXLen.getSizeInBits()) {
770 Helper.Observer.changingInstr(MI);
771 Helper.widenScalarSrc(MI, sXLen, II->ScalarOperand + 2,
772 TargetOpcode::G_ANYEXT);
773 Helper.Observer.changedInstr(MI);
774 } else if (MRI.getType(OldScalar).getSizeInBits() >
775 sXLen.getSizeInBits()) {
776 // TODO: i64 in riscv32.
777 return false;
778 }
779 }
780 }
781 return true;
782 }
783
784 switch (IntrinsicID) {
785 default:
786 return false;
787 case Intrinsic::vacopy: {
788 // vacopy arguments must be legal because of the intrinsic signature.
789 // No need to check here.
790
791 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
792 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
793 MachineFunction &MF = *MI.getMF();
794 const DataLayout &DL = MIRBuilder.getDataLayout();
795 LLVMContext &Ctx = MF.getFunction().getContext();
796
797 Register DstLst = MI.getOperand(1).getReg();
798 LLT PtrTy = MRI.getType(DstLst);
799
800 // Load the source va_list
801 Align Alignment = DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx));
803 MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, Alignment);
804 auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
805
806 // Store the result in the destination va_list
809 MIRBuilder.buildStore(Tmp, DstLst, *StoreMMO);
810
811 MI.eraseFromParent();
812 return true;
813 }
814 case Intrinsic::riscv_vsetvli:
815 case Intrinsic::riscv_vsetvlimax:
816 case Intrinsic::riscv_masked_atomicrmw_add:
817 case Intrinsic::riscv_masked_atomicrmw_sub:
818 case Intrinsic::riscv_masked_cmpxchg:
819 return true;
820 }
821}
822
823bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,
824 MachineIRBuilder &MIRBuilder) const {
825 // Stores the address of the VarArgsFrameIndex slot into the memory location
826 assert(MI.getOpcode() == TargetOpcode::G_VASTART);
827 MachineFunction *MF = MI.getParent()->getParent();
829 int FI = FuncInfo->getVarArgsFrameIndex();
830 LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());
831 auto FINAddr = MIRBuilder.buildFrameIndex(AddrTy, FI);
832 assert(MI.hasOneMemOperand());
833 MIRBuilder.buildStore(FINAddr, MI.getOperand(0).getReg(),
834 *MI.memoperands()[0]);
835 MI.eraseFromParent();
836 return true;
837}
838
839bool RISCVLegalizerInfo::legalizeBRJT(MachineInstr &MI,
840 MachineIRBuilder &MIRBuilder) const {
841 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
842 auto &MF = *MI.getParent()->getParent();
843 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
844 unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout());
845
846 Register PtrReg = MI.getOperand(0).getReg();
847 LLT PtrTy = MRI.getType(PtrReg);
848 Register IndexReg = MI.getOperand(2).getReg();
849 LLT IndexTy = MRI.getType(IndexReg);
850
851 if (!isPowerOf2_32(EntrySize))
852 return false;
853
854 auto ShiftAmt = MIRBuilder.buildConstant(IndexTy, Log2_32(EntrySize));
855 IndexReg = MIRBuilder.buildShl(IndexTy, IndexReg, ShiftAmt).getReg(0);
856
857 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, PtrReg, IndexReg);
858
861 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout())));
862
863 Register TargetReg;
864 switch (MJTI->getEntryKind()) {
865 default:
866 return false;
868 // For PIC, the sequence is:
869 // BRIND(load(Jumptable + index) + RelocBase)
870 // RelocBase can be JumpTable, GOT or some sort of global base.
871 unsigned LoadOpc =
872 STI.is64Bit() ? TargetOpcode::G_SEXTLOAD : TargetOpcode::G_LOAD;
873 auto Load = MIRBuilder.buildLoadInstr(LoadOpc, IndexTy, Addr, *MMO);
874 TargetReg = MIRBuilder.buildPtrAdd(PtrTy, PtrReg, Load).getReg(0);
875 break;
876 }
878 auto Load = MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, IndexTy,
879 Addr, *MMO);
880 TargetReg = MIRBuilder.buildIntToPtr(PtrTy, Load).getReg(0);
881 break;
882 }
884 TargetReg = MIRBuilder.buildLoad(PtrTy, Addr, *MMO).getReg(0);
885 break;
886 }
887
888 MIRBuilder.buildBrIndirect(TargetReg);
889
890 MI.eraseFromParent();
891 return true;
892}
893
894bool RISCVLegalizerInfo::shouldBeInConstantPool(const APInt &APImm,
895 bool ShouldOptForSize) const {
896 assert(APImm.getBitWidth() == 32 || APImm.getBitWidth() == 64);
897 int64_t Imm = APImm.getSExtValue();
898 // All simm32 constants should be handled by isel.
899 // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
900 // this check redundant, but small immediates are common so this check
901 // should have better compile time.
902 if (isInt<32>(Imm))
903 return false;
904
905 // We only need to cost the immediate, if constant pool lowering is enabled.
906 if (!STI.useConstantPoolForLargeInts())
907 return false;
908
910 if (Seq.size() <= STI.getMaxBuildIntsCost())
911 return false;
912
913 // Optimizations below are disabled for opt size. If we're optimizing for
914 // size, use a constant pool.
915 if (ShouldOptForSize)
916 return true;
917 //
918 // Special case. See if we can build the constant as (ADD (SLLI X, C), X) do
919 // that if it will avoid a constant pool.
920 // It will require an extra temporary register though.
921 // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
922 // low and high 32 bits are the same and bit 31 and 63 are set.
923 unsigned ShiftAmt, AddOpc;
925 RISCVMatInt::generateTwoRegInstSeq(Imm, STI, ShiftAmt, AddOpc);
926 return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());
927}
928
929bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
930 MachineIRBuilder &MIB) const {
931 const LLT XLenTy(STI.getXLenVT());
932 Register Dst = MI.getOperand(0).getReg();
933
934 // We define our scalable vector types for lmul=1 to use a 64 bit known
935 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
936 // vscale as VLENB / 8.
937 static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
938 if (STI.getRealMinVLen() < RISCV::RVVBitsPerBlock)
939 // Support for VLEN==32 is incomplete.
940 return false;
941
942 // We assume VLENB is a multiple of 8. We manually choose the best shift
943 // here because SimplifyDemandedBits isn't always able to simplify it.
944 uint64_t Val = MI.getOperand(1).getCImm()->getZExtValue();
945 if (isPowerOf2_64(Val)) {
946 uint64_t Log2 = Log2_64(Val);
947 if (Log2 < 3) {
948 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
949 MIB.buildLShr(Dst, VLENB, MIB.buildConstant(XLenTy, 3 - Log2));
950 } else if (Log2 > 3) {
951 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
952 MIB.buildShl(Dst, VLENB, MIB.buildConstant(XLenTy, Log2 - 3));
953 } else {
954 MIB.buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
955 }
956 } else if ((Val % 8) == 0) {
957 // If the multiplier is a multiple of 8, scale it down to avoid needing
958 // to shift the VLENB value.
959 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
960 MIB.buildMul(Dst, VLENB, MIB.buildConstant(XLenTy, Val / 8));
961 } else {
962 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
963 auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3));
964 MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val));
965 }
966 MI.eraseFromParent();
967 return true;
968}
969
970// Custom-lower extensions from mask vectors by using a vselect either with 1
971// for zero/any-extension or -1 for sign-extension:
972// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
973// Note that any-extension is lowered identically to zero-extension.
974bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
975 MachineIRBuilder &MIB) const {
976
977 unsigned Opc = MI.getOpcode();
978 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
979 Opc == TargetOpcode::G_ANYEXT);
980
981 MachineRegisterInfo &MRI = *MIB.getMRI();
982 Register Dst = MI.getOperand(0).getReg();
983 Register Src = MI.getOperand(1).getReg();
984
985 LLT DstTy = MRI.getType(Dst);
986 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
987 LLT DstEltTy = DstTy.getElementType();
988 auto SplatZero = MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, 0));
989 auto SplatTrue =
990 MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, ExtTrueVal));
991 MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);
992
993 MI.eraseFromParent();
994 return true;
995}
996
997bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
998 LegalizerHelper &Helper,
999 MachineIRBuilder &MIB) const {
1001 "Machine instructions must be Load/Store.");
1002 MachineRegisterInfo &MRI = *MIB.getMRI();
1003 MachineFunction *MF = MI.getMF();
1004 const DataLayout &DL = MIB.getDataLayout();
1005 LLVMContext &Ctx = MF->getFunction().getContext();
1006
1007 Register DstReg = MI.getOperand(0).getReg();
1008 LLT DataTy = MRI.getType(DstReg);
1009 if (!DataTy.isVector())
1010 return false;
1011
1012 if (!MI.hasOneMemOperand())
1013 return false;
1014
1015 MachineMemOperand *MMO = *MI.memoperands_begin();
1016
1017 const auto *TLI = STI.getTargetLowering();
1018 EVT VT = EVT::getEVT(getTypeForLLT(DataTy, Ctx));
1019
1020 if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *MMO))
1021 return true;
1022
1023 unsigned EltSizeBits = DataTy.getScalarSizeInBits();
1024 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1025 "Unexpected unaligned RVV load type");
1026
1027 // Calculate the new vector type with i8 elements
1028 unsigned NumElements =
1029 DataTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
1030 LLT NewDataTy = LLT::scalable_vector(NumElements, 8);
1031
1032 Helper.bitcast(MI, 0, NewDataTy);
1033
1034 return true;
1035}
1036
1037/// Return the type of the mask type suitable for masking the provided
1038/// vector type. This is simply an i1 element type vector of the same
1039/// (possibly scalable) length.
1040static LLT getMaskTypeFor(LLT VecTy) {
1041 assert(VecTy.isVector());
1042 ElementCount EC = VecTy.getElementCount();
1043 return LLT::vector(EC, LLT::scalar(1));
1044}
1045
1046/// Creates an all ones mask suitable for masking a vector of type VecTy with
1047/// vector length VL.
1049 MachineIRBuilder &MIB,
1051 LLT MaskTy = getMaskTypeFor(VecTy);
1052 return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
1053}
1054
1055/// Gets the two common "VL" operands: an all-ones mask and the vector length.
1056/// VecTy is a scalable vector type.
1057static std::pair<MachineInstrBuilder, MachineInstrBuilder>
1059 assert(VecTy.isScalableVector() && "Expecting scalable container type");
1060 const RISCVSubtarget &STI = MIB.getMF().getSubtarget<RISCVSubtarget>();
1061 LLT XLenTy(STI.getXLenVT());
1062 auto VL = MIB.buildConstant(XLenTy, -1);
1063 auto Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
1064 return {Mask, VL};
1065}
1066
1068buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo,
1069 Register Hi, const SrcOp &VL, MachineIRBuilder &MIB,
1071 // TODO: If the Hi bits of the splat are undefined, then it's fine to just
1072 // splat Lo even if it might be sign extended. I don't think we have
1073 // introduced a case where we're build a s64 where the upper bits are undef
1074 // yet.
1075
1076 // Fall back to a stack store and stride x0 vector load.
1077 // TODO: need to lower G_SPLAT_VECTOR_SPLIT_I64. This is done in
1078 // preprocessDAG in SDAG.
1079 return MIB.buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
1080 {Passthru, Lo, Hi, VL});
1081}
1082
1084buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
1085 const SrcOp &Scalar, const SrcOp &VL,
1087 assert(Scalar.getLLTTy(MRI) == LLT::scalar(64) && "Unexpected VecTy!");
1088 auto Unmerge = MIB.buildUnmerge(LLT::scalar(32), Scalar);
1089 return buildSplatPartsS64WithVL(Dst, Passthru, Unmerge.getReg(0),
1090 Unmerge.getReg(1), VL, MIB, MRI);
1091}
1092
1093// Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
1094// legal equivalently-sized i8 type, so we can use that as a go-between.
1095// Splats of s1 types that have constant value can be legalized as VMSET_VL or
1096// VMCLR_VL.
1097bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
1098 MachineIRBuilder &MIB) const {
1099 assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
1100
1101 MachineRegisterInfo &MRI = *MIB.getMRI();
1102
1103 Register Dst = MI.getOperand(0).getReg();
1104 Register SplatVal = MI.getOperand(1).getReg();
1105
1106 LLT VecTy = MRI.getType(Dst);
1107 LLT XLenTy(STI.getXLenVT());
1108
1109 // Handle case of s64 element vectors on rv32
1110 if (XLenTy.getSizeInBits() == 32 &&
1111 VecTy.getElementType().getSizeInBits() == 64) {
1112 auto [_, VL] = buildDefaultVLOps(MRI.getType(Dst), MIB, MRI);
1113 buildSplatSplitS64WithVL(Dst, MIB.buildUndef(VecTy), SplatVal, VL, MIB,
1114 MRI);
1115 MI.eraseFromParent();
1116 return true;
1117 }
1118
1119 // All-zeros or all-ones splats are handled specially.
1120 MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);
1121 if (isAllOnesOrAllOnesSplat(SplatValMI, MRI)) {
1122 auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
1123 MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
1124 MI.eraseFromParent();
1125 return true;
1126 }
1127 if (isNullOrNullSplat(SplatValMI, MRI)) {
1128 auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
1129 MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
1130 MI.eraseFromParent();
1131 return true;
1132 }
1133
1134 // Handle non-constant mask splat (i.e. not sure if it's all zeros or all
1135 // ones) by promoting it to an s8 splat.
1136 LLT InterEltTy = LLT::scalar(8);
1137 LLT InterTy = VecTy.changeElementType(InterEltTy);
1138 auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);
1139 auto And =
1140 MIB.buildAnd(InterEltTy, ZExtSplatVal, MIB.buildConstant(InterEltTy, 1));
1141 auto LHS = MIB.buildSplatVector(InterTy, And);
1142 auto ZeroSplat =
1143 MIB.buildSplatVector(InterTy, MIB.buildConstant(InterEltTy, 0));
1144 MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, LHS, ZeroSplat);
1145 MI.eraseFromParent();
1146 return true;
1147}
1148
1149static LLT getLMUL1Ty(LLT VecTy) {
1150 assert(VecTy.getElementType().getSizeInBits() <= 64 &&
1151 "Unexpected vector LLT");
1153 VecTy.getElementType().getSizeInBits(),
1154 VecTy.getElementType());
1155}
1156
1157bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI,
1158 MachineIRBuilder &MIB) const {
1159 GExtractSubvector &ES = cast<GExtractSubvector>(MI);
1160
1161 MachineRegisterInfo &MRI = *MIB.getMRI();
1162
1163 Register Dst = ES.getReg(0);
1164 Register Src = ES.getSrcVec();
1165 uint64_t Idx = ES.getIndexImm();
1166
1167 // With an index of 0 this is a cast-like subvector, which can be performed
1168 // with subregister operations.
1169 if (Idx == 0)
1170 return true;
1171
1172 LLT LitTy = MRI.getType(Dst);
1173 LLT BigTy = MRI.getType(Src);
1174
1175 if (LitTy.getElementType() == LLT::scalar(1)) {
1176 // We can't slide this mask vector up indexed by its i1 elements.
1177 // This poses a problem when we wish to insert a scalable vector which
1178 // can't be re-expressed as a larger type. Just choose the slow path and
1179 // extend to a larger type, then truncate back down.
1180 LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
1181 LLT ExtLitTy = LitTy.changeElementType(LLT::scalar(8));
1182 auto BigZExt = MIB.buildZExt(ExtBigTy, Src);
1183 auto ExtractZExt = MIB.buildExtractSubvector(ExtLitTy, BigZExt, Idx);
1184 auto SplatZero = MIB.buildSplatVector(
1185 ExtLitTy, MIB.buildConstant(ExtLitTy.getElementType(), 0));
1186 MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, ExtractZExt, SplatZero);
1187 MI.eraseFromParent();
1188 return true;
1189 }
1190
1191 // extract_subvector scales the index by vscale if the subvector is scalable,
1192 // and decomposeSubvectorInsertExtractToSubRegs takes this into account.
1193 const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
1194 MVT LitTyMVT = getMVTForLLT(LitTy);
1195 auto Decompose =
1197 getMVTForLLT(BigTy), LitTyMVT, Idx, TRI);
1198 unsigned RemIdx = Decompose.second;
1199
1200 // If the Idx has been completely eliminated then this is a subvector extract
1201 // which naturally aligns to a vector register. These can easily be handled
1202 // using subregister manipulation.
1203 if (RemIdx == 0)
1204 return true;
1205
1206 // Else LitTy is M1 or smaller and may need to be slid down: if LitTy
1207 // was > M1 then the index would need to be a multiple of VLMAX, and so would
1208 // divide exactly.
1209 assert(
1212
1213 // If the vector type is an LMUL-group type, extract a subvector equal to the
1214 // nearest full vector register type.
1215 LLT InterLitTy = BigTy;
1216 Register Vec = Src;
1218 getLMUL1Ty(BigTy).getSizeInBits())) {
1219 // If BigTy has an LMUL > 1, then LitTy should have a smaller LMUL, and
1220 // we should have successfully decomposed the extract into a subregister.
1221 assert(Decompose.first != RISCV::NoSubRegister);
1222 InterLitTy = getLMUL1Ty(BigTy);
1223 // SDAG builds a TargetExtractSubreg. We cannot create a a Copy with SubReg
1224 // specified on the source Register (the equivalent) since generic virtual
1225 // register does not allow subregister index.
1226 Vec = MIB.buildExtractSubvector(InterLitTy, Src, Idx - RemIdx).getReg(0);
1227 }
1228
1229 // Slide this vector register down by the desired number of elements in order
1230 // to place the desired subvector starting at element 0.
1231 const LLT XLenTy(STI.getXLenVT());
1232 auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx);
1233 auto [Mask, VL] = buildDefaultVLOps(InterLitTy, MIB, MRI);
1235 auto Slidedown = MIB.buildInstr(
1236 RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
1237 {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
1238
1239 // Now the vector is in the right position, extract our final subvector. This
1240 // should resolve to a COPY.
1241 MIB.buildExtractSubvector(Dst, Slidedown, 0);
1242
1243 MI.eraseFromParent();
1244 return true;
1245}
1246
1247bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI,
1248 LegalizerHelper &Helper,
1249 MachineIRBuilder &MIB) const {
1250 GInsertSubvector &IS = cast<GInsertSubvector>(MI);
1251
1252 MachineRegisterInfo &MRI = *MIB.getMRI();
1253
1254 Register Dst = IS.getReg(0);
1255 Register BigVec = IS.getBigVec();
1256 Register LitVec = IS.getSubVec();
1257 uint64_t Idx = IS.getIndexImm();
1258
1259 LLT BigTy = MRI.getType(BigVec);
1260 LLT LitTy = MRI.getType(LitVec);
1261
1262 if (Idx == 0 &&
1263 MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1264 return true;
1265
1266 // We don't have the ability to slide mask vectors up indexed by their i1
1267 // elements; the smallest we can do is i8. Often we are able to bitcast to
1268 // equivalent i8 vectors. Otherwise, we can must zeroextend to equivalent i8
1269 // vectors and truncate down after the insert.
1270 if (LitTy.getElementType() == LLT::scalar(1)) {
1271 auto BigTyMinElts = BigTy.getElementCount().getKnownMinValue();
1272 auto LitTyMinElts = LitTy.getElementCount().getKnownMinValue();
1273 if (BigTyMinElts >= 8 && LitTyMinElts >= 8)
1274 return Helper.bitcast(
1275 IS, 0,
1277
1278 // We can't slide this mask vector up indexed by its i1 elements.
1279 // This poses a problem when we wish to insert a scalable vector which
1280 // can't be re-expressed as a larger type. Just choose the slow path and
1281 // extend to a larger type, then truncate back down.
1282 LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
1283 return Helper.widenScalar(IS, 0, ExtBigTy);
1284 }
1285
1286 const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
1287 unsigned SubRegIdx, RemIdx;
1288 std::tie(SubRegIdx, RemIdx) =
1290 getMVTForLLT(BigTy), getMVTForLLT(LitTy), Idx, TRI);
1291
1292 TypeSize VecRegSize = TypeSize::getScalable(RISCV::RVVBitsPerBlock);
1294 STI.expandVScale(LitTy.getSizeInBits()).getKnownMinValue()));
1295 bool ExactlyVecRegSized =
1296 STI.expandVScale(LitTy.getSizeInBits())
1297 .isKnownMultipleOf(STI.expandVScale(VecRegSize));
1298
1299 // If the Idx has been completely eliminated and this subvector's size is a
1300 // vector register or a multiple thereof, or the surrounding elements are
1301 // undef, then this is a subvector insert which naturally aligns to a vector
1302 // register. These can easily be handled using subregister manipulation.
1303 if (RemIdx == 0 && ExactlyVecRegSized)
1304 return true;
1305
1306 // If the subvector is smaller than a vector register, then the insertion
1307 // must preserve the undisturbed elements of the register. We do this by
1308 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
1309 // (which resolves to a subregister copy), performing a VSLIDEUP to place the
1310 // subvector within the vector register, and an INSERT_SUBVECTOR of that
1311 // LMUL=1 type back into the larger vector (resolving to another subregister
1312 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
1313 // to avoid allocating a large register group to hold our subvector.
1314
1315 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
1316 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
1317 // (in our case undisturbed). This means we can set up a subvector insertion
1318 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
1319 // size of the subvector.
1320 const LLT XLenTy(STI.getXLenVT());
1321 LLT InterLitTy = BigTy;
1322 Register AlignedExtract = BigVec;
1323 unsigned AlignedIdx = Idx - RemIdx;
1325 getLMUL1Ty(BigTy).getSizeInBits())) {
1326 InterLitTy = getLMUL1Ty(BigTy);
1327 // Extract a subvector equal to the nearest full vector register type. This
1328 // should resolve to a G_EXTRACT on a subreg.
1329 AlignedExtract =
1330 MIB.buildExtractSubvector(InterLitTy, BigVec, AlignedIdx).getReg(0);
1331 }
1332
1333 auto Insert = MIB.buildInsertSubvector(InterLitTy, MIB.buildUndef(InterLitTy),
1334 LitVec, 0);
1335
1336 auto [Mask, _] = buildDefaultVLOps(InterLitTy, MIB, MRI);
1337 auto VL = MIB.buildVScale(XLenTy, LitTy.getElementCount().getKnownMinValue());
1338
1339 // If we're inserting into the lowest elements, use a tail undisturbed
1340 // vmv.v.v.
1341 MachineInstrBuilder Inserted;
1342 bool NeedInsertSubvec =
1343 TypeSize::isKnownGT(BigTy.getSizeInBits(), InterLitTy.getSizeInBits());
1344 Register InsertedDst =
1345 NeedInsertSubvec ? MRI.createGenericVirtualRegister(InterLitTy) : Dst;
1346 if (RemIdx == 0) {
1347 Inserted = MIB.buildInstr(RISCV::G_VMV_V_V_VL, {InsertedDst},
1348 {AlignedExtract, Insert, VL});
1349 } else {
1350 auto SlideupAmt = MIB.buildVScale(XLenTy, RemIdx);
1351 // Construct the vector length corresponding to RemIdx + length(LitTy).
1352 VL = MIB.buildAdd(XLenTy, SlideupAmt, VL);
1353 // Use tail agnostic policy if we're inserting over InterLitTy's tail.
1354 ElementCount EndIndex =
1357 if (STI.expandVScale(EndIndex) ==
1358 STI.expandVScale(InterLitTy.getElementCount()))
1360
1361 Inserted =
1362 MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},
1363 {AlignedExtract, Insert, SlideupAmt, Mask, VL, Policy});
1364 }
1365
1366 // If required, insert this subvector back into the correct vector register.
1367 // This should resolve to an INSERT_SUBREG instruction.
1368 if (NeedInsertSubvec)
1369 MIB.buildInsertSubvector(Dst, BigVec, Inserted, AlignedIdx);
1370
1371 MI.eraseFromParent();
1372 return true;
1373}
1374
1375static unsigned getRISCVWOpcode(unsigned Opcode) {
1376 switch (Opcode) {
1377 default:
1378 llvm_unreachable("Unexpected opcode");
1379 case TargetOpcode::G_ASHR:
1380 return RISCV::G_SRAW;
1381 case TargetOpcode::G_LSHR:
1382 return RISCV::G_SRLW;
1383 case TargetOpcode::G_SHL:
1384 return RISCV::G_SLLW;
1385 case TargetOpcode::G_SDIV:
1386 return RISCV::G_DIVW;
1387 case TargetOpcode::G_UDIV:
1388 return RISCV::G_DIVUW;
1389 case TargetOpcode::G_UREM:
1390 return RISCV::G_REMUW;
1391 case TargetOpcode::G_ROTL:
1392 return RISCV::G_ROLW;
1393 case TargetOpcode::G_ROTR:
1394 return RISCV::G_RORW;
1395 case TargetOpcode::G_CTLZ:
1396 return RISCV::G_CLZW;
1397 case TargetOpcode::G_CTTZ:
1398 return RISCV::G_CTZW;
1399 case TargetOpcode::G_CTLS:
1400 return RISCV::G_CLSW;
1401 case TargetOpcode::G_FPTOSI:
1402 return RISCV::G_FCVT_W_RV64;
1403 case TargetOpcode::G_FPTOUI:
1404 return RISCV::G_FCVT_WU_RV64;
1405 }
1406}
1407
1410 LostDebugLocObserver &LocObserver) const {
1411 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1412 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1413 MachineFunction &MF = *MI.getParent()->getParent();
1414 switch (MI.getOpcode()) {
1415 default:
1416 // No idea what to do.
1417 return false;
1418 case TargetOpcode::G_ABS:
1419 return Helper.lowerAbsToMaxNeg(MI);
1420 case TargetOpcode::G_FCONSTANT: {
1421 const APFloat &FVal = MI.getOperand(1).getFPImm()->getValueAPF();
1422
1423 // Convert G_FCONSTANT to G_CONSTANT.
1424 Register DstReg = MI.getOperand(0).getReg();
1425 MIRBuilder.buildConstant(DstReg, FVal.bitcastToAPInt());
1426
1427 MI.eraseFromParent();
1428 return true;
1429 }
1430 case TargetOpcode::G_CONSTANT: {
1431 const Function &F = MF.getFunction();
1432 // TODO: if PSI and BFI are present, add " ||
1433 // llvm::shouldOptForSize(*CurMBB, PSI, BFI)".
1434 bool ShouldOptForSize = F.hasOptSize();
1435 const ConstantInt *ConstVal = MI.getOperand(1).getCImm();
1436 if (!shouldBeInConstantPool(ConstVal->getValue(), ShouldOptForSize))
1437 return true;
1438 return Helper.lowerConstant(MI);
1439 }
1440 case TargetOpcode::G_SUB:
1441 case TargetOpcode::G_ADD: {
1442 Helper.Observer.changingInstr(MI);
1443 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1444 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);
1445
1446 Register DstALU = MRI.createGenericVirtualRegister(sXLen);
1447
1448 MachineOperand &MO = MI.getOperand(0);
1449 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
1450 auto DstSext = MIRBuilder.buildSExtInReg(sXLen, DstALU, 32);
1451
1452 MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {MO}, {DstSext});
1453 MO.setReg(DstALU);
1454
1455 Helper.Observer.changedInstr(MI);
1456 return true;
1457 }
1458 case TargetOpcode::G_SEXT_INREG: {
1459 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1460 int64_t SizeInBits = MI.getOperand(2).getImm();
1461 // Source size of 32 is sext.w.
1462 if (DstTy.getSizeInBits() == 64 && SizeInBits == 32)
1463 return true;
1464
1465 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
1466 return true;
1467
1468 return Helper.lower(MI, 0, /* Unused hint type */ LLT()) ==
1470 }
1471 case TargetOpcode::G_ASHR:
1472 case TargetOpcode::G_LSHR:
1473 case TargetOpcode::G_SHL: {
1474 if (getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI)) {
1475 // We don't need a custom node for shift by constant. Just widen the
1476 // source and the shift amount.
1477 unsigned ExtOpc = TargetOpcode::G_ANYEXT;
1478 if (MI.getOpcode() == TargetOpcode::G_ASHR)
1479 ExtOpc = TargetOpcode::G_SEXT;
1480 else if (MI.getOpcode() == TargetOpcode::G_LSHR)
1481 ExtOpc = TargetOpcode::G_ZEXT;
1482
1483 Helper.Observer.changingInstr(MI);
1484 Helper.widenScalarSrc(MI, sXLen, 1, ExtOpc);
1485 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ZEXT);
1486 Helper.widenScalarDst(MI, sXLen);
1487 Helper.Observer.changedInstr(MI);
1488 return true;
1489 }
1490
1491 Helper.Observer.changingInstr(MI);
1492 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1493 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);
1494 Helper.widenScalarDst(MI, sXLen);
1495 MI.setDesc(MIRBuilder.getTII().get(getRISCVWOpcode(MI.getOpcode())));
1496 Helper.Observer.changedInstr(MI);
1497 return true;
1498 }
1499 case TargetOpcode::G_SDIV:
1500 case TargetOpcode::G_UDIV:
1501 case TargetOpcode::G_UREM:
1502 case TargetOpcode::G_ROTL:
1503 case TargetOpcode::G_ROTR: {
1504 Helper.Observer.changingInstr(MI);
1505 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1506 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);
1507 Helper.widenScalarDst(MI, sXLen);
1508 MI.setDesc(MIRBuilder.getTII().get(getRISCVWOpcode(MI.getOpcode())));
1509 Helper.Observer.changedInstr(MI);
1510 return true;
1511 }
1512 case TargetOpcode::G_CTLZ:
1513 case TargetOpcode::G_CTTZ:
1514 case TargetOpcode::G_CTLS: {
1515 Helper.Observer.changingInstr(MI);
1516 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1517 Helper.widenScalarDst(MI, sXLen);
1518 MI.setDesc(MIRBuilder.getTII().get(getRISCVWOpcode(MI.getOpcode())));
1519 Helper.Observer.changedInstr(MI);
1520 return true;
1521 }
1522 case TargetOpcode::G_FPTOSI:
1523 case TargetOpcode::G_FPTOUI: {
1524 Helper.Observer.changingInstr(MI);
1525 Helper.widenScalarDst(MI, sXLen);
1526 MI.setDesc(MIRBuilder.getTII().get(getRISCVWOpcode(MI.getOpcode())));
1528 Helper.Observer.changedInstr(MI);
1529 return true;
1530 }
1531 case TargetOpcode::G_IS_FPCLASS: {
1532 Register GISFPCLASS = MI.getOperand(0).getReg();
1533 Register Src = MI.getOperand(1).getReg();
1534 const MachineOperand &ImmOp = MI.getOperand(2);
1535 MachineIRBuilder MIB(MI);
1536
1537 // Turn LLVM IR's floating point classes to that in RISC-V,
1538 // by simply rotating the 10-bit immediate right by two bits.
1539 APInt GFpClassImm(10, static_cast<uint64_t>(ImmOp.getImm()));
1540 auto FClassMask = MIB.buildConstant(sXLen, GFpClassImm.rotr(2).zext(XLen));
1541 auto ConstZero = MIB.buildConstant(sXLen, 0);
1542
1543 auto GFClass = MIB.buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
1544 auto And = MIB.buildAnd(sXLen, GFClass, FClassMask);
1545 MIB.buildICmp(CmpInst::ICMP_NE, GISFPCLASS, And, ConstZero);
1546
1547 MI.eraseFromParent();
1548 return true;
1549 }
1550 case TargetOpcode::G_BRJT:
1551 return legalizeBRJT(MI, MIRBuilder);
1552 case TargetOpcode::G_VASTART:
1553 return legalizeVAStart(MI, MIRBuilder);
1554 case TargetOpcode::G_VSCALE:
1555 return legalizeVScale(MI, MIRBuilder);
1556 case TargetOpcode::G_ZEXT:
1557 case TargetOpcode::G_SEXT:
1558 case TargetOpcode::G_ANYEXT:
1559 return legalizeExt(MI, MIRBuilder);
1560 case TargetOpcode::G_SPLAT_VECTOR:
1561 return legalizeSplatVector(MI, MIRBuilder);
1562 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1563 return legalizeExtractSubvector(MI, MIRBuilder);
1564 case TargetOpcode::G_INSERT_SUBVECTOR:
1565 return legalizeInsertSubvector(MI, Helper, MIRBuilder);
1566 case TargetOpcode::G_LOAD:
1567 case TargetOpcode::G_STORE:
1568 return legalizeLoadStore(MI, Helper, MIRBuilder);
1569 }
1570
1571 llvm_unreachable("expected switch to return");
1572}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
#define _
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
static LLT getLMUL1Ty(LLT VecTy)
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static std::pair< MachineInstrBuilder, MachineInstrBuilder > buildDefaultVLOps(LLT VecTy, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
static unsigned getRISCVWOpcode(unsigned Opcode)
This file declares the targeting of the Machinelegalizer class for RISC-V.
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1416
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1023
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
Definition APInt.cpp:1165
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1571
@ ICMP_NE
not equal
Definition InstrTypes.h:698
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LLVM_ABI void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)
Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...
LLVM_ABI LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LLVM_ABI LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LLVM_ABI LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)
Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LLVM_ABI LegalizeResult lowerConstant(MachineInstr &MI)
LLVM_ABI void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)
Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static RISCVVType::VLMUL getLMUL(MVT VT)
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Register getReg() const
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:2042
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1613
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1595
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
std::function< bool(const LegalityQuery &)> LegalityPredicate
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:434
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< LLT > Types
Matching combinators.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.