LLVM 22.0.0git
X86LegalizerInfo.cpp
Go to the documentation of this file.
1//===- X86LegalizerInfo.cpp --------------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for X86.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "X86LegalizerInfo.h"
14#include "X86Subtarget.h"
15#include "X86TargetMachine.h"
24#include "llvm/IR/Type.h"
25
26using namespace llvm;
27using namespace TargetOpcode;
28using namespace LegalizeActions;
29using namespace LegalityPredicates;
30
32 const X86TargetMachine &TM)
33 : Subtarget(STI) {
34
35 bool Is64Bit = Subtarget.is64Bit();
36 bool HasCMOV = Subtarget.canUseCMOV();
37 bool HasSSE1 = Subtarget.hasSSE1();
38 bool HasSSE2 = Subtarget.hasSSE2();
39 bool HasSSE41 = Subtarget.hasSSE41();
40 bool HasAVX = Subtarget.hasAVX();
41 bool HasAVX2 = Subtarget.hasAVX2();
42 bool HasAVX512 = Subtarget.hasAVX512();
43 bool HasVLX = Subtarget.hasVLX();
44 bool HasDQI = Subtarget.hasAVX512() && Subtarget.hasDQI();
45 bool HasBWI = Subtarget.hasAVX512() && Subtarget.hasBWI();
46 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
47 bool HasPOPCNT = Subtarget.hasPOPCNT();
48 bool HasLZCNT = Subtarget.hasLZCNT();
49 bool HasBMI = Subtarget.hasBMI();
50
51 const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0));
52 const LLT s1 = LLT::scalar(1);
53 const LLT s8 = LLT::scalar(8);
54 const LLT s16 = LLT::scalar(16);
55 const LLT s32 = LLT::scalar(32);
56 const LLT s64 = LLT::scalar(64);
57 const LLT s80 = LLT::scalar(80);
58 const LLT s128 = LLT::scalar(128);
59 const LLT sMaxScalar = Subtarget.is64Bit() ? s64 : s32;
60 const LLT v2s32 = LLT::fixed_vector(2, 32);
61 const LLT v4s8 = LLT::fixed_vector(4, 8);
62
63 const LLT v16s8 = LLT::fixed_vector(16, 8);
64 const LLT v8s16 = LLT::fixed_vector(8, 16);
65 const LLT v4s32 = LLT::fixed_vector(4, 32);
66 const LLT v2s64 = LLT::fixed_vector(2, 64);
67 const LLT v2p0 = LLT::fixed_vector(2, p0);
68
69 const LLT v32s8 = LLT::fixed_vector(32, 8);
70 const LLT v16s16 = LLT::fixed_vector(16, 16);
71 const LLT v8s32 = LLT::fixed_vector(8, 32);
72 const LLT v4s64 = LLT::fixed_vector(4, 64);
73 const LLT v4p0 = LLT::fixed_vector(4, p0);
74
75 const LLT v64s8 = LLT::fixed_vector(64, 8);
76 const LLT v32s16 = LLT::fixed_vector(32, 16);
77 const LLT v16s32 = LLT::fixed_vector(16, 32);
78 const LLT v8s64 = LLT::fixed_vector(8, 64);
79
80 const LLT s8MaxVector = HasAVX512 ? v64s8 : HasAVX ? v32s8 : v16s8;
81 const LLT s16MaxVector = HasAVX512 ? v32s16 : HasAVX ? v16s16 : v8s16;
82 const LLT s32MaxVector = HasAVX512 ? v16s32 : HasAVX ? v8s32 : v4s32;
83 const LLT s64MaxVector = HasAVX512 ? v8s64 : HasAVX ? v4s64 : v2s64;
84
85 // todo: AVX512 bool vector predicate types
86
87 // implicit/constants
88 // 32/64-bits needs support for s64/s128 to handle cases:
89 // s64 = EXTEND (G_IMPLICIT_DEF s32) -> s64 = G_IMPLICIT_DEF
90 // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF
91 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
92 .legalFor({p0, s1, s8, s16, s32, s64})
93 .legalFor(Is64Bit, {s128});
94
96 .legalFor({p0, s8, s16, s32})
97 .legalFor(Is64Bit, {s64})
98 .widenScalarToNextPow2(0, /*Min=*/8)
99 .clampScalar(0, s8, sMaxScalar);
100
101 getActionDefinitionsBuilder({G_LROUND, G_LLROUND, G_FCOS, G_FCOSH, G_FACOS,
102 G_FSIN, G_FSINH, G_FASIN, G_FTAN, G_FTANH,
103 G_FATAN, G_FATAN2, G_FPOW, G_FEXP, G_FEXP2,
104 G_FEXP10, G_FLOG, G_FLOG2, G_FLOG10, G_FPOWI,
105 G_FSINCOS, G_FCEIL, G_FFLOOR})
106 .libcall();
107
109 .legalFor(HasSSE1 || UseX87, {s32})
110 .legalFor(HasSSE2 || UseX87, {s64})
111 .legalFor(UseX87, {s80});
112
113 getActionDefinitionsBuilder(G_GET_ROUNDING).customFor({s32});
114
115 // merge/unmerge
116 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
117 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
118 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
120 .widenScalarToNextPow2(LitTyIdx, /*Min=*/8)
121 .widenScalarToNextPow2(BigTyIdx, /*Min=*/16)
122 .minScalar(LitTyIdx, s8)
123 .minScalar(BigTyIdx, s32)
124 .legalIf([=](const LegalityQuery &Q) {
125 switch (Q.Types[BigTyIdx].getSizeInBits()) {
126 case 16:
127 case 32:
128 case 64:
129 case 128:
130 case 256:
131 case 512:
132 break;
133 default:
134 return false;
135 }
136 switch (Q.Types[LitTyIdx].getSizeInBits()) {
137 case 8:
138 case 16:
139 case 32:
140 case 64:
141 case 128:
142 case 256:
143 return true;
144 default:
145 return false;
146 }
147 });
148 }
149
150 // integer addition/subtraction
151 getActionDefinitionsBuilder({G_ADD, G_SUB})
152 .legalFor({s8, s16, s32})
153 .legalFor(Is64Bit, {s64})
154 .legalFor(HasSSE2, {v16s8, v8s16, v4s32, v2s64})
155 .legalFor(HasAVX2, {v32s8, v16s16, v8s32, v4s64})
156 .legalFor(HasAVX512, {v16s32, v8s64})
157 .legalFor(HasBWI, {v64s8, v32s16})
158 .clampMinNumElements(0, s8, 16)
159 .clampMinNumElements(0, s16, 8)
160 .clampMinNumElements(0, s32, 4)
161 .clampMinNumElements(0, s64, 2)
162 .clampMaxNumElements(0, s8, HasBWI ? 64 : (HasAVX2 ? 32 : 16))
163 .clampMaxNumElements(0, s16, HasBWI ? 32 : (HasAVX2 ? 16 : 8))
164 .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX2 ? 8 : 4))
165 .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX2 ? 4 : 2))
166 .widenScalarToNextPow2(0, /*Min=*/32)
167 .clampScalar(0, s8, sMaxScalar)
168 .scalarize(0);
169
170 getActionDefinitionsBuilder({G_UADDE, G_UADDO, G_USUBE, G_USUBO})
171 .legalFor({{s8, s1}, {s16, s1}, {s32, s1}})
172 .legalFor(Is64Bit, {{s64, s1}})
173 .widenScalarToNextPow2(0, /*Min=*/32)
174 .clampScalar(0, s8, sMaxScalar)
175 .clampScalar(1, s1, s1)
176 .scalarize(0);
177
178 // integer multiply
180 .legalFor({s8, s16, s32})
181 .legalFor(Is64Bit, {s64})
182 .legalFor(HasSSE2, {v8s16})
183 .legalFor(HasSSE41, {v4s32})
184 .legalFor(HasAVX2, {v16s16, v8s32})
185 .legalFor(HasAVX512, {v16s32})
186 .legalFor(HasDQI, {v8s64})
187 .legalFor(HasDQI && HasVLX, {v2s64, v4s64})
188 .legalFor(HasBWI, {v32s16})
189 .clampMinNumElements(0, s16, 8)
190 .clampMinNumElements(0, s32, 4)
191 .clampMinNumElements(0, s64, HasVLX ? 2 : 8)
192 .clampMaxNumElements(0, s16, HasBWI ? 32 : (HasAVX2 ? 16 : 8))
193 .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX2 ? 8 : 4))
194 .clampMaxNumElements(0, s64, 8)
195 .widenScalarToNextPow2(0, /*Min=*/32)
196 .clampScalar(0, s8, sMaxScalar)
197 .scalarize(0);
198
199 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
200 .legalFor({s8, s16, s32})
201 .legalFor(Is64Bit, {s64})
202 .widenScalarToNextPow2(0, /*Min=*/32)
203 .clampScalar(0, s8, sMaxScalar)
204 .scalarize(0);
205
206 // integer divisions
207 getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM})
208 .legalFor({s8, s16, s32})
209 .legalFor(Is64Bit, {s64})
210 .libcallFor({s64})
211 .clampScalar(0, s8, sMaxScalar);
212
213 // integer shifts
214 getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
215 .legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
216 .legalFor(Is64Bit, {{s64, s8}})
217 .clampScalar(0, s8, sMaxScalar)
218 .clampScalar(1, s8, s8);
219
220 // integer logic
221 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
222 .legalFor({s8, s16, s32})
223 .legalFor(Is64Bit, {s64})
224 .legalFor(HasSSE2, {v16s8, v8s16, v4s32, v2s64})
225 .legalFor(HasAVX, {v32s8, v16s16, v8s32, v4s64})
226 .legalFor(HasAVX512, {v64s8, v32s16, v16s32, v8s64})
227 .clampMinNumElements(0, s8, 16)
228 .clampMinNumElements(0, s16, 8)
229 .clampMinNumElements(0, s32, 4)
230 .clampMinNumElements(0, s64, 2)
231 .clampMaxNumElements(0, s8, HasAVX512 ? 64 : (HasAVX ? 32 : 16))
232 .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8))
233 .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4))
234 .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2))
235 .widenScalarToNextPow2(0, /*Min=*/32)
236 .clampScalar(0, s8, sMaxScalar)
237 .scalarize(0);
238
239 // integer comparison
240 const std::initializer_list<LLT> IntTypes32 = {s8, s16, s32, p0};
241 const std::initializer_list<LLT> IntTypes64 = {s8, s16, s32, s64, p0};
242
244 .legalForCartesianProduct({s8}, Is64Bit ? IntTypes64 : IntTypes32)
245 .clampScalar(0, s8, s8)
246 .clampScalar(1, s8, sMaxScalar);
247
248 // bswap
250 .legalFor({s32})
251 .legalFor(Is64Bit, {s64})
252 .widenScalarToNextPow2(0, /*Min=*/32)
253 .clampScalar(0, s32, sMaxScalar);
254
255 // popcount
257 .legalFor(HasPOPCNT, {{s16, s16}, {s32, s32}})
258 .legalFor(HasPOPCNT && Is64Bit, {{s64, s64}})
259 .widenScalarToNextPow2(1, /*Min=*/16)
260 .clampScalar(1, s16, sMaxScalar)
261 .scalarSameSizeAs(0, 1);
262
263 // count leading zeros (LZCNT)
265 .legalFor(HasLZCNT, {{s16, s16}, {s32, s32}})
266 .legalFor(HasLZCNT && Is64Bit, {{s64, s64}})
267 .widenScalarToNextPow2(1, /*Min=*/16)
268 .clampScalar(1, s16, sMaxScalar)
269 .scalarSameSizeAs(0, 1);
270
271 // count trailing zeros
272 getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF)
273 .legalFor({{s16, s16}, {s32, s32}})
274 .legalFor(Is64Bit, {{s64, s64}})
275 .widenScalarToNextPow2(1, /*Min=*/16)
276 .clampScalar(1, s16, sMaxScalar)
277 .scalarSameSizeAs(0, 1);
278
280 .legalFor(HasBMI, {{s16, s16}, {s32, s32}})
281 .legalFor(HasBMI && Is64Bit, {{s64, s64}})
282 .widenScalarToNextPow2(1, /*Min=*/16)
283 .clampScalar(1, s16, sMaxScalar)
284 .scalarSameSizeAs(0, 1);
285
286 // control flow
288 .legalFor({s8, s16, s32, p0})
289 .legalFor(UseX87, {s80})
290 .legalFor(Is64Bit, {s64})
291 .legalFor(HasSSE1, {v16s8, v8s16, v4s32, v2s64})
292 .legalFor(HasAVX, {v32s8, v16s16, v8s32, v4s64})
293 .legalFor(HasAVX512, {v64s8, v32s16, v16s32, v8s64})
294 .clampMinNumElements(0, s8, 16)
295 .clampMinNumElements(0, s16, 8)
296 .clampMinNumElements(0, s32, 4)
297 .clampMinNumElements(0, s64, 2)
298 .clampMaxNumElements(0, s8, HasAVX512 ? 64 : (HasAVX ? 32 : 16))
299 .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8))
300 .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4))
301 .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2))
302 .widenScalarToNextPow2(0, /*Min=*/32)
303 .clampScalar(0, s8, sMaxScalar)
304 .scalarize(0);
305
307
308 // pointer handling
309 const std::initializer_list<LLT> PtrTypes32 = {s1, s8, s16, s32};
310 const std::initializer_list<LLT> PtrTypes64 = {s1, s8, s16, s32, s64};
311
313 .legalForCartesianProduct(Is64Bit ? PtrTypes64 : PtrTypes32, {p0})
314 .maxScalar(0, sMaxScalar)
315 .widenScalarToNextPow2(0, /*Min*/ 8);
316
317 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, sMaxScalar}});
318
319 getActionDefinitionsBuilder(G_CONSTANT_POOL).legalFor({p0});
320
322 .legalFor({{p0, s32}})
323 .legalFor(Is64Bit, {{p0, s64}})
324 .widenScalarToNextPow2(1, /*Min*/ 32)
325 .clampScalar(1, s32, sMaxScalar);
326
327 getActionDefinitionsBuilder({G_FRAME_INDEX, G_GLOBAL_VALUE}).legalFor({p0});
328
329 // load/store: add more corner cases
330 for (unsigned Op : {G_LOAD, G_STORE}) {
331 auto &Action = getActionDefinitionsBuilder(Op);
332 Action.legalForTypesWithMemDesc({{s8, p0, s8, 1},
333 {s16, p0, s16, 1},
334 {s32, p0, s32, 1},
335 {s80, p0, s80, 1},
336 {p0, p0, p0, 1},
337 {v4s8, p0, v4s8, 1}});
338 if (Is64Bit)
339 Action.legalForTypesWithMemDesc(
340 {{s64, p0, s64, 1}, {v2s32, p0, v2s32, 1}});
341
342 if (HasSSE1)
343 Action.legalForTypesWithMemDesc({{v4s32, p0, v4s32, 1}});
344 if (HasSSE2)
345 Action.legalForTypesWithMemDesc({{v16s8, p0, v16s8, 1},
346 {v8s16, p0, v8s16, 1},
347 {v2s64, p0, v2s64, 1},
348 {v2p0, p0, v2p0, 1}});
349 if (HasAVX)
350 Action.legalForTypesWithMemDesc({{v32s8, p0, v32s8, 1},
351 {v16s16, p0, v16s16, 1},
352 {v8s32, p0, v8s32, 1},
353 {v4s64, p0, v4s64, 1},
354 {v4p0, p0, v4p0, 1}});
355 if (HasAVX512)
356 Action.legalForTypesWithMemDesc({{v64s8, p0, v64s8, 1},
357 {v32s16, p0, v32s16, 1},
358 {v16s32, p0, v16s32, 1},
359 {v8s64, p0, v8s64, 1}});
360
361 // X86 supports extending loads but not stores for GPRs
362 if (Op == G_LOAD) {
363 Action.legalForTypesWithMemDesc({{s8, p0, s1, 1},
364 {s16, p0, s8, 1},
365 {s32, p0, s8, 1},
366 {s32, p0, s16, 1}});
367 if (Is64Bit)
368 Action.legalForTypesWithMemDesc(
369 {{s64, p0, s8, 1}, {s64, p0, s16, 1}, {s64, p0, s32, 1}});
370 } else {
371 Action.customIf([=](const LegalityQuery &Query) {
372 return Query.Types[0] != Query.MMODescrs[0].MemoryTy;
373 });
374 }
375 Action.widenScalarToNextPow2(0, /*Min=*/8)
376 .clampScalar(0, s8, sMaxScalar)
377 .scalarize(0);
378 }
379
380 for (unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
381 auto &Action = getActionDefinitionsBuilder(Op);
382 Action.legalForTypesWithMemDesc(
383 {{s16, p0, s8, 1}, {s32, p0, s8, 1}, {s32, p0, s16, 1}});
384 if (Is64Bit)
385 Action.legalForTypesWithMemDesc(
386 {{s64, p0, s8, 1}, {s64, p0, s16, 1}, {s64, p0, s32, 1}});
387 // TODO - SSE41/AVX2/AVX512F/AVX512BW vector extensions
388 }
389
390 // sext, zext, and anyext
392 .legalFor({s8, s16, s32, s128})
393 .legalFor(Is64Bit, {s64})
394 .widenScalarToNextPow2(0, /*Min=*/8)
395 .clampScalar(0, s8, sMaxScalar)
396 .widenScalarToNextPow2(1, /*Min=*/8)
397 .clampScalar(1, s8, sMaxScalar)
398 .scalarize(0);
399
400 getActionDefinitionsBuilder({G_SEXT, G_ZEXT})
401 .legalFor({s8, s16, s32})
402 .legalFor(Is64Bit, {s64})
403 .widenScalarToNextPow2(0, /*Min=*/8)
404 .clampScalar(0, s8, sMaxScalar)
405 .widenScalarToNextPow2(1, /*Min=*/8)
406 .clampScalar(1, s8, sMaxScalar)
407 .scalarize(0);
408
409 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
410
411 // fp constants
412 getActionDefinitionsBuilder(G_FCONSTANT)
413 .legalFor({s32, s64})
414 .legalFor(UseX87, {s80});
415
416 // fp arithmetic
417 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV})
418 .legalFor({s32, s64})
419 .legalFor(HasSSE1, {v4s32})
420 .legalFor(HasSSE2, {v2s64})
421 .legalFor(HasAVX, {v8s32, v4s64})
422 .legalFor(HasAVX512, {v16s32, v8s64})
423 .legalFor(UseX87, {s80});
424
426 .legalFor(UseX87, {s80})
427 .legalFor(UseX87 && !Is64Bit, {s64})
428 .lower();
429
430 // fp comparison
432 .legalFor(HasSSE1 || UseX87, {s8, s32})
433 .legalFor(HasSSE2 || UseX87, {s8, s64})
434 .legalFor(UseX87, {s8, s80})
435 .clampScalar(0, s8, s8)
436 .clampScalar(1, s32, HasSSE2 ? s64 : s32)
438
439 // fp conversions
441 .legalFor(HasSSE2, {{s64, s32}})
442 .legalFor(HasAVX, {{v4s64, v4s32}})
443 .legalFor(HasAVX512, {{v8s64, v8s32}});
444
446 .legalFor(HasSSE2, {{s32, s64}})
447 .legalFor(HasAVX, {{v4s32, v4s64}})
448 .legalFor(HasAVX512, {{v8s32, v8s64}});
449
451 .legalFor(HasSSE1, {{s32, s32}})
452 .legalFor(HasSSE1 && Is64Bit, {{s32, s64}})
453 .legalFor(HasSSE2, {{s64, s32}})
454 .legalFor(HasSSE2 && Is64Bit, {{s64, s64}})
455 .clampScalar(1, (UseX87 && !HasSSE1) ? s16 : s32, sMaxScalar)
457 .customForCartesianProduct(UseX87, {s32, s64, s80}, {s16, s32, s64})
458 .clampScalar(0, s32, HasSSE2 ? s64 : s32)
460
462 .legalFor(HasSSE1, {{s32, s32}})
463 .legalFor(HasSSE1 && Is64Bit, {{s64, s32}})
464 .legalFor(HasSSE2, {{s32, s64}})
465 .legalFor(HasSSE2 && Is64Bit, {{s64, s64}})
466 .clampScalar(0, (UseX87 && !HasSSE1) ? s16 : s32, sMaxScalar)
468 .customForCartesianProduct(UseX87, {s16, s32, s64}, {s32, s64, s80})
469 .clampScalar(1, s32, HasSSE2 ? s64 : s32)
471
472 // For G_UITOFP and G_FPTOUI without AVX512, we have to custom legalize types
473 // <= s32 manually. Otherwise, in custom handler there is no way to
474 // understand whether s32 is an original type and we need to promote it to
475 // s64 or s32 is obtained after widening and we shouldn't widen it to s64.
476 //
477 // For AVX512 we simply widen types as there is direct mapping from opcodes
478 // to asm instructions.
480 .legalFor(HasAVX512, {{s32, s32}, {s32, s64}, {s64, s32}, {s64, s64}})
481 .customIf([=](const LegalityQuery &Query) {
482 return !HasAVX512 &&
483 ((HasSSE1 && typeIs(0, s32)(Query)) ||
484 (HasSSE2 && typeIs(0, s64)(Query))) &&
485 scalarNarrowerThan(1, Is64Bit ? 64 : 32)(Query);
486 })
487 .lowerIf([=](const LegalityQuery &Query) {
488 // Lower conversions from s64
489 return !HasAVX512 &&
490 ((HasSSE1 && typeIs(0, s32)(Query)) ||
491 (HasSSE2 && typeIs(0, s64)(Query))) &&
492 (Is64Bit && typeIs(1, s64)(Query));
493 })
494 .clampScalar(0, s32, HasSSE2 ? s64 : s32)
496 .clampScalar(1, s32, sMaxScalar)
498
500 .legalFor(HasAVX512, {{s32, s32}, {s32, s64}, {s64, s32}, {s64, s64}})
501 .customIf([=](const LegalityQuery &Query) {
502 return !HasAVX512 &&
503 ((HasSSE1 && typeIs(1, s32)(Query)) ||
504 (HasSSE2 && typeIs(1, s64)(Query))) &&
505 scalarNarrowerThan(0, Is64Bit ? 64 : 32)(Query);
506 })
507 // TODO: replace with customized legalization using
508 // specifics of cvttsd2si. The selection of this node requires
509 // a vector type. Either G_SCALAR_TO_VECTOR is needed or more advanced
510 // support of G_BUILD_VECTOR/G_INSERT_VECTOR_ELT is required beforehand.
511 .lowerIf([=](const LegalityQuery &Query) {
512 return !HasAVX512 &&
513 ((HasSSE1 && typeIs(1, s32)(Query)) ||
514 (HasSSE2 && typeIs(1, s64)(Query))) &&
515 (Is64Bit && typeIs(0, s64)(Query));
516 })
517 .clampScalar(0, s32, sMaxScalar)
519 .clampScalar(1, s32, HasSSE2 ? s64 : s32)
521
522 // vector ops
523 getActionDefinitionsBuilder(G_BUILD_VECTOR)
524 .customIf([=](const LegalityQuery &Query) {
525 return (HasSSE1 && typeInSet(0, {v4s32})(Query)) ||
526 (HasSSE2 && typeInSet(0, {v2s64, v8s16, v16s8})(Query)) ||
527 (HasAVX && typeInSet(0, {v4s64, v8s32, v16s16, v32s8})(Query)) ||
528 (HasAVX512 && typeInSet(0, {v8s64, v16s32, v32s16, v64s8}));
529 })
530 .clampNumElements(0, v16s8, s8MaxVector)
531 .clampNumElements(0, v8s16, s16MaxVector)
532 .clampNumElements(0, v4s32, s32MaxVector)
533 .clampNumElements(0, v2s64, s64MaxVector)
535
536 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT})
537 .legalIf([=](const LegalityQuery &Query) {
538 unsigned SubIdx = Query.Opcode == G_EXTRACT ? 0 : 1;
539 unsigned FullIdx = Query.Opcode == G_EXTRACT ? 1 : 0;
540 return (HasAVX && typePairInSet(SubIdx, FullIdx,
541 {{v16s8, v32s8},
542 {v8s16, v16s16},
543 {v4s32, v8s32},
544 {v2s64, v4s64}})(Query)) ||
545 (HasAVX512 && typePairInSet(SubIdx, FullIdx,
546 {{v16s8, v64s8},
547 {v32s8, v64s8},
548 {v8s16, v32s16},
549 {v16s16, v32s16},
550 {v4s32, v16s32},
551 {v8s32, v16s32},
552 {v2s64, v8s64},
553 {v4s64, v8s64}})(Query));
554 });
555
556 // todo: only permit dst types up to max legal vector register size?
557 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
558 .legalFor(
559 HasSSE1,
560 {{v32s8, v16s8}, {v16s16, v8s16}, {v8s32, v4s32}, {v4s64, v2s64}})
561 .legalFor(HasAVX, {{v64s8, v16s8},
562 {v64s8, v32s8},
563 {v32s16, v8s16},
564 {v32s16, v16s16},
565 {v16s32, v4s32},
566 {v16s32, v8s32},
567 {v8s64, v2s64},
568 {v8s64, v4s64}});
569
570 // todo: vectors and address spaces
572 .legalFor({{s8, s32}, {s16, s32}, {s32, s32}, {s64, s32}, {p0, s32}})
573 .widenScalarToNextPow2(0, /*Min=*/8)
574 .clampScalar(0, HasCMOV ? s16 : s8, sMaxScalar)
575 .clampScalar(1, s32, s32);
576
577 // memory intrinsics
578 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
579
580 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
581 .lower();
582
583 // fp intrinsics
584 getActionDefinitionsBuilder({G_INTRINSIC_ROUNDEVEN, G_INTRINSIC_TRUNC})
585 .scalarize(0)
586 .minScalar(0, LLT::scalar(32))
587 .libcall();
588
589 getActionDefinitionsBuilder({G_FREEZE, G_CONSTANT_FOLD_BARRIER})
590 .legalFor({s8, s16, s32, s64, p0})
591 .widenScalarToNextPow2(0, /*Min=*/8)
592 .clampScalar(0, s8, sMaxScalar);
593
595 verify(*STI.getInstrInfo());
596}
597
599 LostDebugLocObserver &LocObserver) const {
600 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
601 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
602 switch (MI.getOpcode()) {
603 default:
604 // No idea what to do.
605 return false;
606 case TargetOpcode::G_BUILD_VECTOR:
607 return legalizeBuildVector(MI, MRI, Helper);
608 case TargetOpcode::G_FPTOUI:
609 return legalizeFPTOUI(MI, MRI, Helper);
610 case TargetOpcode::G_UITOFP:
611 return legalizeUITOFP(MI, MRI, Helper);
612 case TargetOpcode::G_STORE:
613 return legalizeNarrowingStore(MI, MRI, Helper);
614 case TargetOpcode::G_SITOFP:
615 return legalizeSITOFP(MI, MRI, Helper);
616 case TargetOpcode::G_FPTOSI:
617 return legalizeFPTOSI(MI, MRI, Helper);
618 case TargetOpcode::G_GET_ROUNDING:
619 return legalizeGETROUNDING(MI, MRI, Helper);
620 }
621 llvm_unreachable("expected switch to return");
622}
623
624bool X86LegalizerInfo::legalizeSITOFP(MachineInstr &MI,
626 LegalizerHelper &Helper) const {
627 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
628 MachineFunction &MF = *MI.getMF();
629 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
630
631 assert((SrcTy.getSizeInBits() == 16 || SrcTy.getSizeInBits() == 32 ||
632 SrcTy.getSizeInBits() == 64) &&
633 "Unexpected source type for SITOFP in X87 mode.");
634
635 TypeSize MemSize = SrcTy.getSizeInBytes();
636 MachinePointerInfo PtrInfo;
637 Align Alignmt = Helper.getStackTemporaryAlignment(SrcTy);
638 auto SlotPointer = Helper.createStackTemporary(MemSize, Alignmt, PtrInfo);
640 PtrInfo, MachineMemOperand::MOStore, MemSize, Align(MemSize));
641
642 // Store the integer value on the FPU stack.
643 MIRBuilder.buildStore(Src, SlotPointer, *StoreMMO);
644
646 PtrInfo, MachineMemOperand::MOLoad, MemSize, Align(MemSize));
647 MIRBuilder.buildInstr(X86::G_FILD)
648 .addDef(Dst)
649 .addUse(SlotPointer.getReg(0))
650 .addMemOperand(LoadMMO);
651
652 MI.eraseFromParent();
653 return true;
654}
655
656bool X86LegalizerInfo::legalizeFPTOSI(MachineInstr &MI,
658 LegalizerHelper &Helper) const {
659 MachineFunction &MF = *MI.getMF();
660 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
661 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
662
663 TypeSize MemSize = DstTy.getSizeInBytes();
664 MachinePointerInfo PtrInfo;
665 Align Alignmt = Helper.getStackTemporaryAlignment(DstTy);
666 auto SlotPointer = Helper.createStackTemporary(MemSize, Alignmt, PtrInfo);
668 PtrInfo, MachineMemOperand::MOStore, MemSize, Align(MemSize));
669
670 MIRBuilder.buildInstr(X86::G_FIST)
671 .addUse(Src)
672 .addUse(SlotPointer.getReg(0))
673 .addMemOperand(StoreMMO);
674
675 MIRBuilder.buildLoad(Dst, SlotPointer, PtrInfo, Align(MemSize));
676 MI.eraseFromParent();
677 return true;
678}
679
680bool X86LegalizerInfo::legalizeBuildVector(MachineInstr &MI,
682 LegalizerHelper &Helper) const {
683 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
684 const auto &BuildVector = cast<GBuildVector>(MI);
685 Register Dst = BuildVector.getReg(0);
686 LLT DstTy = MRI.getType(Dst);
687 MachineFunction &MF = MIRBuilder.getMF();
688 LLVMContext &Ctx = MF.getFunction().getContext();
689 uint64_t DstTySize = DstTy.getScalarSizeInBits();
690
692 for (unsigned i = 0; i < BuildVector.getNumSources(); ++i) {
693 Register Source = BuildVector.getSourceReg(i);
694
695 auto ValueAndReg = getIConstantVRegValWithLookThrough(Source, MRI);
696 if (ValueAndReg) {
697 CstIdxs.emplace_back(ConstantInt::get(Ctx, ValueAndReg->Value));
698 continue;
699 }
700
701 auto FPValueAndReg = getFConstantVRegValWithLookThrough(Source, MRI);
702 if (FPValueAndReg) {
703 CstIdxs.emplace_back(ConstantFP::get(Ctx, FPValueAndReg->Value));
704 continue;
705 }
706
707 if (getOpcodeDef<GImplicitDef>(Source, MRI)) {
708 CstIdxs.emplace_back(UndefValue::get(Type::getIntNTy(Ctx, DstTySize)));
709 continue;
710 }
711 return false;
712 }
713
714 Constant *ConstVal = ConstantVector::get(CstIdxs);
715
716 const DataLayout &DL = MIRBuilder.getDataLayout();
717 unsigned AddrSpace = DL.getDefaultGlobalsAddressSpace();
718 Align Alignment(DL.getABITypeAlign(ConstVal->getType()));
719 auto Addr = MIRBuilder.buildConstantPool(
720 LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace)),
721 MF.getConstantPool()->getConstantPoolIndex(ConstVal, Alignment));
722 MachineMemOperand *MMO =
724 MachineMemOperand::MOLoad, DstTy, Alignment);
725
726 MIRBuilder.buildLoad(Dst, Addr, *MMO);
727 MI.eraseFromParent();
728 return true;
729}
730
731bool X86LegalizerInfo::legalizeFPTOUI(MachineInstr &MI,
733 LegalizerHelper &Helper) const {
734 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
735 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
736 unsigned DstSizeInBits = DstTy.getScalarSizeInBits();
737 const LLT s32 = LLT::scalar(32);
738 const LLT s64 = LLT::scalar(64);
739
740 // Simply reuse FPTOSI when it is possible to widen the type
741 if (DstSizeInBits <= 32) {
742 auto Casted = MIRBuilder.buildFPTOSI(DstTy == s32 ? s64 : s32, Src);
743 MIRBuilder.buildTrunc(Dst, Casted);
744 MI.eraseFromParent();
745 return true;
746 }
747
748 return false;
749}
750
751bool X86LegalizerInfo::legalizeUITOFP(MachineInstr &MI,
753 LegalizerHelper &Helper) const {
754 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
755 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
756 const LLT s32 = LLT::scalar(32);
757 const LLT s64 = LLT::scalar(64);
758
759 // Simply reuse SITOFP when it is possible to widen the type
760 if (SrcTy.getSizeInBits() <= 32) {
761 auto Ext = MIRBuilder.buildZExt(SrcTy == s32 ? s64 : s32, Src);
762 MIRBuilder.buildSITOFP(Dst, Ext);
763 MI.eraseFromParent();
764 return true;
765 }
766
767 return false;
768}
769
770bool X86LegalizerInfo::legalizeNarrowingStore(MachineInstr &MI,
772 LegalizerHelper &Helper) const {
773 auto &Store = cast<GStore>(MI);
774 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
775 MachineMemOperand &MMO = **Store.memoperands_begin();
776 MachineFunction &MF = MIRBuilder.getMF();
777 LLT ValTy = MRI.getType(Store.getValueReg());
778 auto *NewMMO = MF.getMachineMemOperand(&MMO, MMO.getPointerInfo(), ValTy);
779
780 Helper.Observer.changingInstr(Store);
781 Store.setMemRefs(MF, {NewMMO});
782 Helper.Observer.changedInstr(Store);
783 return true;
784}
785
786bool X86LegalizerInfo::legalizeGETROUNDING(MachineInstr &MI,
788 LegalizerHelper &Helper) const {
789 /*
790 The rounding mode is in bits 11:10 of FPSR, and has the following
791 settings:
792 00 Round to nearest
793 01 Round to -inf
794 10 Round to +inf
795 11 Round to 0
796
797 GET_ROUNDING, on the other hand, expects the following:
798 -1 Undefined
799 0 Round to 0
800 1 Round to nearest
801 2 Round to +inf
802 3 Round to -inf
803
804 To perform the conversion, we use a packed lookup table of the four 2-bit
805 values that we can index by FPSP[11:10]
806 0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
807
808 (0x2d >> ((FPSR >> 9) & 6)) & 3
809 */
810
811 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
812 MachineFunction &MF = MIRBuilder.getMF();
813 Register Dst = MI.getOperand(0).getReg();
814 LLT DstTy = MRI.getType(Dst);
815 const LLT s8 = LLT::scalar(8);
816 const LLT s16 = LLT::scalar(16);
817 const LLT s32 = LLT::scalar(32);
818
819 // Save FP Control Word to stack slot
820 int MemSize = 2;
821 Align Alignment = Align(2);
822 MachinePointerInfo PtrInfo;
823 auto StackTemp = Helper.createStackTemporary(TypeSize::getFixed(MemSize),
824 Alignment, PtrInfo);
825 Register StackPtr = StackTemp.getReg(0);
826
827 auto StoreMMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
828 MemSize, Alignment);
829
830 // Store FP Control Word to stack slot using G_FNSTCW16
831 MIRBuilder.buildInstr(X86::G_FNSTCW16)
832 .addUse(StackPtr)
833 .addMemOperand(StoreMMO);
834
835 // Load FP Control Word from stack slot
836 auto LoadMMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
837 MemSize, Alignment);
838
839 auto CWD32 =
840 MIRBuilder.buildZExt(s32, MIRBuilder.buildLoad(s16, StackPtr, *LoadMMO));
841 auto Shifted8 = MIRBuilder.buildTrunc(
842 s8, MIRBuilder.buildLShr(s32, CWD32, MIRBuilder.buildConstant(s8, 9)));
843 auto Masked32 = MIRBuilder.buildZExt(
844 s32, MIRBuilder.buildAnd(s8, Shifted8, MIRBuilder.buildConstant(s8, 6)));
845
846 // LUT is a packed lookup table (0x2d) used to map the 2-bit x87 FPU rounding
847 // mode (from bits 11:10 of the control word) to the values expected by
848 // GET_ROUNDING. The mapping is performed by shifting LUT right by the
849 // extracted rounding mode and masking the result with 3 to obtain the final
850 auto LUT = MIRBuilder.buildConstant(s32, 0x2d);
851 auto LUTShifted = MIRBuilder.buildLShr(s32, LUT, Masked32);
852 auto RetVal =
853 MIRBuilder.buildAnd(s32, LUTShifted, MIRBuilder.buildConstant(s32, 3));
854 auto RetValTrunc = MIRBuilder.buildZExtOrTrunc(DstTy, RetVal);
855
856 MIRBuilder.buildCopy(Dst, RetValTrunc);
857
858 MI.eraseFromParent();
859 return true;
860}
861
863 MachineInstr &MI) const {
864 return true;
865}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void scalarize(Instruction *I, SmallVectorImpl< Instruction * > &Replace)
Definition ExpandFp.cpp:942
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
ppc ctr loops verify
static const char LUT[]
This file declares the targeting of the Machinelegalizer class for X86.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
LLVM_ABI void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & clampMaxNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MaxElements)
Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet & clampMinNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MinElements)
Limit the number of elements in EltTy vectors to at least MinElements.
LegalizeRuleSet & customForCartesianProduct(std::initializer_list< LLT > Types)
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & clampNumElements(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the number of elements for the given vectors to at least MinTy's number of elements and at most...
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LLVM_ABI MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment, MachinePointerInfo &PtrInfo)
Create a stack temporary based on the size in bytes and the alignment.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LLVM_ABI Align getStackTemporaryAlignment(LLT Type, Align MinAlign=Align()) const
Return the alignment to use for a stack temporary object with the given type.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOSI(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI Src0.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildSITOFP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_SITOFP Src0.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
reference emplace_back(ArgTypes &&... Args)
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:301
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
X86LegalizerInfo(const X86Subtarget &STI, const X86TargetMachine &TM)
const X86InstrInfo * getInstrInfo() const override
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LLVM_ABI LegalityPredicate typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1, std::initializer_list< std::pair< LLT, LLT > > TypesInit)
True iff the given types for the given pair of type indexes is one of the specified type pairs.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalityPredicate scalarNarrowerThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar that's narrower than the given size.
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:651
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DWARFExpression::Operation Op
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition Utils.cpp:447
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:433
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
ArrayRef< LLT > Types
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.