LLVM 20.0.0git
TargetLoweringBase.cpp
Go to the documentation of this file.
1//===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the TargetLoweringBase class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ADT/BitVector.h"
14#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/StringRef.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/Analysis/Loads.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/CallingConv.h"
40#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/GlobalValue.h"
45#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/Module.h"
47#include "llvm/IR/Type.h"
57#include <algorithm>
58#include <cassert>
59#include <cstdint>
60#include <cstring>
61#include <iterator>
62#include <string>
63#include <tuple>
64#include <utility>
65
66using namespace llvm;
67
69 "jump-is-expensive", cl::init(false),
70 cl::desc("Do not create extra branches to split comparison logic."),
72
74 ("min-jump-table-entries", cl::init(4), cl::Hidden,
75 cl::desc("Set minimum number of entries to use a jump table."));
76
78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79 cl::desc("Set maximum size of jump tables."));
80
81/// Minimum jump table density for normal functions.
83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84 cl::desc("Minimum density for building a jump table in "
85 "a normal function"));
86
87/// Minimum jump table density for -Os or -Oz functions.
89 "optsize-jump-table-density", cl::init(40), cl::Hidden,
90 cl::desc("Minimum density for building a jump table in "
91 "an optsize function"));
92
93// FIXME: This option is only to test if the strict fp operation processed
94// correctly by preventing mutating strict fp operation to normal fp operation
95// during development. When the backend supports strict float operation, this
96// option will be meaningless.
97static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
98 cl::desc("Don't mutate strict-float node to a legalize node"),
99 cl::init(false), cl::Hidden);
100
101/// GetFPLibCall - Helper to return the right libcall for the given floating
102/// point type, or UNKNOWN_LIBCALL if there is none.
104 RTLIB::Libcall Call_F32,
105 RTLIB::Libcall Call_F64,
106 RTLIB::Libcall Call_F80,
107 RTLIB::Libcall Call_F128,
108 RTLIB::Libcall Call_PPCF128) {
109 return
110 VT == MVT::f32 ? Call_F32 :
111 VT == MVT::f64 ? Call_F64 :
112 VT == MVT::f80 ? Call_F80 :
113 VT == MVT::f128 ? Call_F128 :
114 VT == MVT::ppcf128 ? Call_PPCF128 :
115 RTLIB::UNKNOWN_LIBCALL;
116}
117
118/// getFPEXT - Return the FPEXT_*_* value for the given types, or
119/// UNKNOWN_LIBCALL if there is none.
121 if (OpVT == MVT::f16) {
122 if (RetVT == MVT::f32)
123 return FPEXT_F16_F32;
124 if (RetVT == MVT::f64)
125 return FPEXT_F16_F64;
126 if (RetVT == MVT::f80)
127 return FPEXT_F16_F80;
128 if (RetVT == MVT::f128)
129 return FPEXT_F16_F128;
130 } else if (OpVT == MVT::f32) {
131 if (RetVT == MVT::f64)
132 return FPEXT_F32_F64;
133 if (RetVT == MVT::f128)
134 return FPEXT_F32_F128;
135 if (RetVT == MVT::ppcf128)
136 return FPEXT_F32_PPCF128;
137 } else if (OpVT == MVT::f64) {
138 if (RetVT == MVT::f128)
139 return FPEXT_F64_F128;
140 else if (RetVT == MVT::ppcf128)
141 return FPEXT_F64_PPCF128;
142 } else if (OpVT == MVT::f80) {
143 if (RetVT == MVT::f128)
144 return FPEXT_F80_F128;
145 } else if (OpVT == MVT::bf16) {
146 if (RetVT == MVT::f32)
147 return FPEXT_BF16_F32;
148 }
149
150 return UNKNOWN_LIBCALL;
151}
152
153/// getFPROUND - Return the FPROUND_*_* value for the given types, or
154/// UNKNOWN_LIBCALL if there is none.
156 if (RetVT == MVT::f16) {
157 if (OpVT == MVT::f32)
158 return FPROUND_F32_F16;
159 if (OpVT == MVT::f64)
160 return FPROUND_F64_F16;
161 if (OpVT == MVT::f80)
162 return FPROUND_F80_F16;
163 if (OpVT == MVT::f128)
164 return FPROUND_F128_F16;
165 if (OpVT == MVT::ppcf128)
166 return FPROUND_PPCF128_F16;
167 } else if (RetVT == MVT::bf16) {
168 if (OpVT == MVT::f32)
169 return FPROUND_F32_BF16;
170 if (OpVT == MVT::f64)
171 return FPROUND_F64_BF16;
172 if (OpVT == MVT::f80)
173 return FPROUND_F80_BF16;
174 if (OpVT == MVT::f128)
175 return FPROUND_F128_BF16;
176 } else if (RetVT == MVT::f32) {
177 if (OpVT == MVT::f64)
178 return FPROUND_F64_F32;
179 if (OpVT == MVT::f80)
180 return FPROUND_F80_F32;
181 if (OpVT == MVT::f128)
182 return FPROUND_F128_F32;
183 if (OpVT == MVT::ppcf128)
184 return FPROUND_PPCF128_F32;
185 } else if (RetVT == MVT::f64) {
186 if (OpVT == MVT::f80)
187 return FPROUND_F80_F64;
188 if (OpVT == MVT::f128)
189 return FPROUND_F128_F64;
190 if (OpVT == MVT::ppcf128)
191 return FPROUND_PPCF128_F64;
192 } else if (RetVT == MVT::f80) {
193 if (OpVT == MVT::f128)
194 return FPROUND_F128_F80;
195 }
196
197 return UNKNOWN_LIBCALL;
198}
199
200/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
201/// UNKNOWN_LIBCALL if there is none.
203 if (OpVT == MVT::f16) {
204 if (RetVT == MVT::i32)
205 return FPTOSINT_F16_I32;
206 if (RetVT == MVT::i64)
207 return FPTOSINT_F16_I64;
208 if (RetVT == MVT::i128)
209 return FPTOSINT_F16_I128;
210 } else if (OpVT == MVT::f32) {
211 if (RetVT == MVT::i32)
212 return FPTOSINT_F32_I32;
213 if (RetVT == MVT::i64)
214 return FPTOSINT_F32_I64;
215 if (RetVT == MVT::i128)
216 return FPTOSINT_F32_I128;
217 } else if (OpVT == MVT::f64) {
218 if (RetVT == MVT::i32)
219 return FPTOSINT_F64_I32;
220 if (RetVT == MVT::i64)
221 return FPTOSINT_F64_I64;
222 if (RetVT == MVT::i128)
223 return FPTOSINT_F64_I128;
224 } else if (OpVT == MVT::f80) {
225 if (RetVT == MVT::i32)
226 return FPTOSINT_F80_I32;
227 if (RetVT == MVT::i64)
228 return FPTOSINT_F80_I64;
229 if (RetVT == MVT::i128)
230 return FPTOSINT_F80_I128;
231 } else if (OpVT == MVT::f128) {
232 if (RetVT == MVT::i32)
233 return FPTOSINT_F128_I32;
234 if (RetVT == MVT::i64)
235 return FPTOSINT_F128_I64;
236 if (RetVT == MVT::i128)
237 return FPTOSINT_F128_I128;
238 } else if (OpVT == MVT::ppcf128) {
239 if (RetVT == MVT::i32)
240 return FPTOSINT_PPCF128_I32;
241 if (RetVT == MVT::i64)
242 return FPTOSINT_PPCF128_I64;
243 if (RetVT == MVT::i128)
244 return FPTOSINT_PPCF128_I128;
245 }
246 return UNKNOWN_LIBCALL;
247}
248
249/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
250/// UNKNOWN_LIBCALL if there is none.
252 if (OpVT == MVT::f16) {
253 if (RetVT == MVT::i32)
254 return FPTOUINT_F16_I32;
255 if (RetVT == MVT::i64)
256 return FPTOUINT_F16_I64;
257 if (RetVT == MVT::i128)
258 return FPTOUINT_F16_I128;
259 } else if (OpVT == MVT::f32) {
260 if (RetVT == MVT::i32)
261 return FPTOUINT_F32_I32;
262 if (RetVT == MVT::i64)
263 return FPTOUINT_F32_I64;
264 if (RetVT == MVT::i128)
265 return FPTOUINT_F32_I128;
266 } else if (OpVT == MVT::f64) {
267 if (RetVT == MVT::i32)
268 return FPTOUINT_F64_I32;
269 if (RetVT == MVT::i64)
270 return FPTOUINT_F64_I64;
271 if (RetVT == MVT::i128)
272 return FPTOUINT_F64_I128;
273 } else if (OpVT == MVT::f80) {
274 if (RetVT == MVT::i32)
275 return FPTOUINT_F80_I32;
276 if (RetVT == MVT::i64)
277 return FPTOUINT_F80_I64;
278 if (RetVT == MVT::i128)
279 return FPTOUINT_F80_I128;
280 } else if (OpVT == MVT::f128) {
281 if (RetVT == MVT::i32)
282 return FPTOUINT_F128_I32;
283 if (RetVT == MVT::i64)
284 return FPTOUINT_F128_I64;
285 if (RetVT == MVT::i128)
286 return FPTOUINT_F128_I128;
287 } else if (OpVT == MVT::ppcf128) {
288 if (RetVT == MVT::i32)
289 return FPTOUINT_PPCF128_I32;
290 if (RetVT == MVT::i64)
291 return FPTOUINT_PPCF128_I64;
292 if (RetVT == MVT::i128)
293 return FPTOUINT_PPCF128_I128;
294 }
295 return UNKNOWN_LIBCALL;
296}
297
298/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
299/// UNKNOWN_LIBCALL if there is none.
301 if (OpVT == MVT::i32) {
302 if (RetVT == MVT::f16)
303 return SINTTOFP_I32_F16;
304 if (RetVT == MVT::f32)
305 return SINTTOFP_I32_F32;
306 if (RetVT == MVT::f64)
307 return SINTTOFP_I32_F64;
308 if (RetVT == MVT::f80)
309 return SINTTOFP_I32_F80;
310 if (RetVT == MVT::f128)
311 return SINTTOFP_I32_F128;
312 if (RetVT == MVT::ppcf128)
313 return SINTTOFP_I32_PPCF128;
314 } else if (OpVT == MVT::i64) {
315 if (RetVT == MVT::f16)
316 return SINTTOFP_I64_F16;
317 if (RetVT == MVT::f32)
318 return SINTTOFP_I64_F32;
319 if (RetVT == MVT::f64)
320 return SINTTOFP_I64_F64;
321 if (RetVT == MVT::f80)
322 return SINTTOFP_I64_F80;
323 if (RetVT == MVT::f128)
324 return SINTTOFP_I64_F128;
325 if (RetVT == MVT::ppcf128)
326 return SINTTOFP_I64_PPCF128;
327 } else if (OpVT == MVT::i128) {
328 if (RetVT == MVT::f16)
329 return SINTTOFP_I128_F16;
330 if (RetVT == MVT::f32)
331 return SINTTOFP_I128_F32;
332 if (RetVT == MVT::f64)
333 return SINTTOFP_I128_F64;
334 if (RetVT == MVT::f80)
335 return SINTTOFP_I128_F80;
336 if (RetVT == MVT::f128)
337 return SINTTOFP_I128_F128;
338 if (RetVT == MVT::ppcf128)
339 return SINTTOFP_I128_PPCF128;
340 }
341 return UNKNOWN_LIBCALL;
342}
343
344/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
345/// UNKNOWN_LIBCALL if there is none.
347 if (OpVT == MVT::i32) {
348 if (RetVT == MVT::f16)
349 return UINTTOFP_I32_F16;
350 if (RetVT == MVT::f32)
351 return UINTTOFP_I32_F32;
352 if (RetVT == MVT::f64)
353 return UINTTOFP_I32_F64;
354 if (RetVT == MVT::f80)
355 return UINTTOFP_I32_F80;
356 if (RetVT == MVT::f128)
357 return UINTTOFP_I32_F128;
358 if (RetVT == MVT::ppcf128)
359 return UINTTOFP_I32_PPCF128;
360 } else if (OpVT == MVT::i64) {
361 if (RetVT == MVT::f16)
362 return UINTTOFP_I64_F16;
363 if (RetVT == MVT::f32)
364 return UINTTOFP_I64_F32;
365 if (RetVT == MVT::f64)
366 return UINTTOFP_I64_F64;
367 if (RetVT == MVT::f80)
368 return UINTTOFP_I64_F80;
369 if (RetVT == MVT::f128)
370 return UINTTOFP_I64_F128;
371 if (RetVT == MVT::ppcf128)
372 return UINTTOFP_I64_PPCF128;
373 } else if (OpVT == MVT::i128) {
374 if (RetVT == MVT::f16)
375 return UINTTOFP_I128_F16;
376 if (RetVT == MVT::f32)
377 return UINTTOFP_I128_F32;
378 if (RetVT == MVT::f64)
379 return UINTTOFP_I128_F64;
380 if (RetVT == MVT::f80)
381 return UINTTOFP_I128_F80;
382 if (RetVT == MVT::f128)
383 return UINTTOFP_I128_F128;
384 if (RetVT == MVT::ppcf128)
385 return UINTTOFP_I128_PPCF128;
386 }
387 return UNKNOWN_LIBCALL;
388}
389
391 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
392 POWI_PPCF128);
393}
394
396 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128,
397 LDEXP_PPCF128);
398}
399
401 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128,
402 FREXP_PPCF128);
403}
404
406 return getFPLibCall(RetVT, SINCOS_F32, SINCOS_F64, SINCOS_F80, SINCOS_F128,
407 SINCOS_PPCF128);
408}
409
411 AtomicOrdering Order,
412 uint64_t MemSize) {
413 unsigned ModeN, ModelN;
414 switch (MemSize) {
415 case 1:
416 ModeN = 0;
417 break;
418 case 2:
419 ModeN = 1;
420 break;
421 case 4:
422 ModeN = 2;
423 break;
424 case 8:
425 ModeN = 3;
426 break;
427 case 16:
428 ModeN = 4;
429 break;
430 default:
431 return RTLIB::UNKNOWN_LIBCALL;
432 }
433
434 switch (Order) {
435 case AtomicOrdering::Monotonic:
436 ModelN = 0;
437 break;
438 case AtomicOrdering::Acquire:
439 ModelN = 1;
440 break;
441 case AtomicOrdering::Release:
442 ModelN = 2;
443 break;
444 case AtomicOrdering::AcquireRelease:
445 case AtomicOrdering::SequentiallyConsistent:
446 ModelN = 3;
447 break;
448 default:
449 return UNKNOWN_LIBCALL;
450 }
451
452 return LC[ModeN][ModelN];
453}
454
456 MVT VT) {
457 if (!VT.isScalarInteger())
458 return UNKNOWN_LIBCALL;
459 uint64_t MemSize = VT.getScalarSizeInBits() / 8;
460
461#define LCALLS(A, B) \
462 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
463#define LCALL5(A) \
464 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
465 switch (Opc) {
467 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
468 return getOutlineAtomicHelper(LC, Order, MemSize);
469 }
470 case ISD::ATOMIC_SWAP: {
471 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
472 return getOutlineAtomicHelper(LC, Order, MemSize);
473 }
475 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
476 return getOutlineAtomicHelper(LC, Order, MemSize);
477 }
478 case ISD::ATOMIC_LOAD_OR: {
479 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
480 return getOutlineAtomicHelper(LC, Order, MemSize);
481 }
483 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
484 return getOutlineAtomicHelper(LC, Order, MemSize);
485 }
487 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
488 return getOutlineAtomicHelper(LC, Order, MemSize);
489 }
490 default:
491 return UNKNOWN_LIBCALL;
492 }
493#undef LCALLS
494#undef LCALL5
495}
496
498#define OP_TO_LIBCALL(Name, Enum) \
499 case Name: \
500 switch (VT.SimpleTy) { \
501 default: \
502 return UNKNOWN_LIBCALL; \
503 case MVT::i8: \
504 return Enum##_1; \
505 case MVT::i16: \
506 return Enum##_2; \
507 case MVT::i32: \
508 return Enum##_4; \
509 case MVT::i64: \
510 return Enum##_8; \
511 case MVT::i128: \
512 return Enum##_16; \
513 }
514
515 switch (Opc) {
516 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
517 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
518 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
519 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
520 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
521 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
522 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
523 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
524 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
525 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
526 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
527 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
528 }
529
530#undef OP_TO_LIBCALL
531
532 return UNKNOWN_LIBCALL;
533}
534
536 switch (ElementSize) {
537 case 1:
538 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
539 case 2:
540 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
541 case 4:
542 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
543 case 8:
544 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
545 case 16:
546 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
547 default:
548 return UNKNOWN_LIBCALL;
549 }
550}
551
553 switch (ElementSize) {
554 case 1:
555 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
556 case 2:
557 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
558 case 4:
559 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
560 case 8:
561 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
562 case 16:
563 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
564 default:
565 return UNKNOWN_LIBCALL;
566 }
567}
568
570 switch (ElementSize) {
571 case 1:
572 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
573 case 2:
574 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
575 case 4:
576 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
577 case 8:
578 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
579 case 16:
580 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
581 default:
582 return UNKNOWN_LIBCALL;
583 }
584}
585
587 std::fill(CmpLibcallCCs, CmpLibcallCCs + RTLIB::UNKNOWN_LIBCALL,
589 CmpLibcallCCs[RTLIB::OEQ_F32] = ISD::SETEQ;
590 CmpLibcallCCs[RTLIB::OEQ_F64] = ISD::SETEQ;
591 CmpLibcallCCs[RTLIB::OEQ_F128] = ISD::SETEQ;
592 CmpLibcallCCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
593 CmpLibcallCCs[RTLIB::UNE_F32] = ISD::SETNE;
594 CmpLibcallCCs[RTLIB::UNE_F64] = ISD::SETNE;
595 CmpLibcallCCs[RTLIB::UNE_F128] = ISD::SETNE;
596 CmpLibcallCCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
597 CmpLibcallCCs[RTLIB::OGE_F32] = ISD::SETGE;
598 CmpLibcallCCs[RTLIB::OGE_F64] = ISD::SETGE;
599 CmpLibcallCCs[RTLIB::OGE_F128] = ISD::SETGE;
600 CmpLibcallCCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
601 CmpLibcallCCs[RTLIB::OLT_F32] = ISD::SETLT;
602 CmpLibcallCCs[RTLIB::OLT_F64] = ISD::SETLT;
603 CmpLibcallCCs[RTLIB::OLT_F128] = ISD::SETLT;
604 CmpLibcallCCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
605 CmpLibcallCCs[RTLIB::OLE_F32] = ISD::SETLE;
606 CmpLibcallCCs[RTLIB::OLE_F64] = ISD::SETLE;
607 CmpLibcallCCs[RTLIB::OLE_F128] = ISD::SETLE;
608 CmpLibcallCCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
609 CmpLibcallCCs[RTLIB::OGT_F32] = ISD::SETGT;
610 CmpLibcallCCs[RTLIB::OGT_F64] = ISD::SETGT;
611 CmpLibcallCCs[RTLIB::OGT_F128] = ISD::SETGT;
612 CmpLibcallCCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
613 CmpLibcallCCs[RTLIB::UO_F32] = ISD::SETNE;
614 CmpLibcallCCs[RTLIB::UO_F64] = ISD::SETNE;
615 CmpLibcallCCs[RTLIB::UO_F128] = ISD::SETNE;
616 CmpLibcallCCs[RTLIB::UO_PPCF128] = ISD::SETNE;
617}
618
619/// NOTE: The TargetMachine owns TLOF.
621 : TM(tm), Libcalls(TM.getTargetTriple()) {
622 initActions();
623
624 // Perform these initializations only once.
630 HasMultipleConditionRegisters = false;
631 HasExtractBitsInsn = false;
632 JumpIsExpensive = JumpIsExpensiveOverride;
634 EnableExtLdPromotion = false;
635 StackPointerRegisterToSaveRestore = 0;
636 BooleanContents = UndefinedBooleanContent;
637 BooleanFloatContents = UndefinedBooleanContent;
638 BooleanVectorContents = UndefinedBooleanContent;
639 SchedPreferenceInfo = Sched::ILP;
642 MaxBytesForAlignment = 0;
643 MaxAtomicSizeInBitsSupported = 0;
644
645 // Assume that even with libcalls, no target supports wider than 128 bit
646 // division.
647 MaxDivRemBitWidthSupported = 128;
648
649 MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
650
651 MinCmpXchgSizeInBits = 0;
652 SupportsUnalignedAtomics = false;
653
654 RTLIB::initCmpLibcallCCs(CmpLibcallCCs);
655}
656
658 // All operations default to being supported.
659 memset(OpActions, 0, sizeof(OpActions));
660 memset(LoadExtActions, 0, sizeof(LoadExtActions));
661 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
662 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
663 memset(CondCodeActions, 0, sizeof(CondCodeActions));
664 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
665 std::fill(std::begin(TargetDAGCombineArray),
666 std::end(TargetDAGCombineArray), 0);
667
668 // Let extending atomic loads be unsupported by default.
669 for (MVT ValVT : MVT::all_valuetypes())
670 for (MVT MemVT : MVT::all_valuetypes())
672 Expand);
673
674 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
675 // remove this and targets should individually set these types if not legal.
678 for (MVT VT : {MVT::i2, MVT::i4})
679 OpActions[(unsigned)VT.SimpleTy][NT] = Expand;
680 }
681 for (MVT AVT : MVT::all_valuetypes()) {
682 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) {
683 setTruncStoreAction(AVT, VT, Expand);
686 }
687 }
688 for (unsigned IM = (unsigned)ISD::PRE_INC;
689 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
690 for (MVT VT : {MVT::i2, MVT::i4}) {
695 }
696 }
697
698 for (MVT VT : MVT::fp_valuetypes()) {
699 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
700 if (IntVT.isValid()) {
703 }
704 }
705
706 // Set default actions for various operations.
707 for (MVT VT : MVT::all_valuetypes()) {
708 // Default all indexed load / store to expand.
709 for (unsigned IM = (unsigned)ISD::PRE_INC;
710 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
715 }
716
717 // Most backends expect to see the node which just returns the value loaded.
719
720 // These operations default to expand.
739 VT, Expand);
740
741 // Overflow operations default to expand
744 VT, Expand);
745
746 // Carry-using overflow operations default to expand.
749 VT, Expand);
750
751 // ADDC/ADDE/SUBC/SUBE default to expand.
753 Expand);
754
755 // [US]CMP default to expand
757
758 // Halving adds
761 Expand);
762
763 // Absolute difference
765
766 // Saturated trunc
770
771 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
773 Expand);
774
776
777 // These library functions default to expand.
780 Expand);
781
782 // These operations default to expand for vector types.
783 if (VT.isVector())
790 VT, Expand);
791
792 // Constrained floating-point operations default to expand.
793#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
794 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
795#include "llvm/IR/ConstrainedOps.def"
796
797 // For most targets @llvm.get.dynamic.area.offset just returns 0.
799
800 // Vector reduction default to expand.
808 VT, Expand);
809
810 // Named vector shuffles default to expand.
812
813 // Only some target support this vector operation. Most need to expand it.
815
816 // VP operations default to expand.
817#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
818 setOperationAction(ISD::SDOPC, VT, Expand);
819#include "llvm/IR/VPIntrinsics.def"
820
821 // Masked vector extracts default to expand.
823
824 // FP environment operations default to expand.
828 }
829
830 // Most targets ignore the @llvm.prefetch intrinsic.
832
833 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
835
836 // Most targets also ignore the @llvm.readsteadycounter intrinsic.
838
839 // ConstantFP nodes default to expand. Targets can either change this to
840 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
841 // to optimize expansions for certain constants.
843 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
844 Expand);
845
846 // These library functions default to expand.
853 {MVT::f32, MVT::f64, MVT::f128}, Expand);
854
855 // FIXME: Query RuntimeLibCalls to make the decision.
857 {MVT::f32, MVT::f64, MVT::f128}, LibCall);
858
861 MVT::f16, Promote);
862 // Default ISD::TRAP to expand (which turns it into abort).
863 setOperationAction(ISD::TRAP, MVT::Other, Expand);
864
865 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
866 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
868
870
873
874 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
877 }
879
880 // This one by default will call __clear_cache unless the target
881 // wants something different.
883}
884
886 EVT) const {
887 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
888}
889
891 const DataLayout &DL) const {
892 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
893 if (LHSTy.isVector())
894 return LHSTy;
895 MVT ShiftVT = getScalarShiftAmountTy(DL, LHSTy);
896 // If any possible shift value won't fit in the prefered type, just use
897 // something safe. Assume it will be legalized when the shift is expanded.
898 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits()))
899 ShiftVT = MVT::i32;
900 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) &&
901 "ShiftVT is still too small!");
902 return ShiftVT;
903}
904
905bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
906 assert(isTypeLegal(VT));
907 switch (Op) {
908 default:
909 return false;
910 case ISD::SDIV:
911 case ISD::UDIV:
912 case ISD::SREM:
913 case ISD::UREM:
914 return true;
915 }
916}
917
919 unsigned DestAS) const {
920 return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
921}
922
924 Type *RetTy, ElementCount EC, bool ZeroIsPoison,
925 const ConstantRange *VScaleRange) const {
926 // Find the smallest "sensible" element type to use for the expansion.
927 ConstantRange CR(APInt(64, EC.getKnownMinValue()));
928 if (EC.isScalable())
929 CR = CR.umul_sat(*VScaleRange);
930
931 if (ZeroIsPoison)
932 CR = CR.subtract(APInt(64, 1));
933
934 unsigned EltWidth = RetTy->getScalarSizeInBits();
935 EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
936 EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
937
938 return EltWidth;
939}
940
942 // If the command-line option was specified, ignore this request.
944 JumpIsExpensive = isExpensive;
945}
946
949 // If this is a simple type, use the ComputeRegisterProp mechanism.
950 if (VT.isSimple()) {
951 MVT SVT = VT.getSimpleVT();
952 assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
953 MVT NVT = TransformToType[SVT.SimpleTy];
954 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
955
956 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
957 LA == TypeSoftPromoteHalf ||
958 (NVT.isVector() ||
959 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
960 "Promote may not follow Expand or Promote");
961
962 if (LA == TypeSplitVector)
963 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
964 if (LA == TypeScalarizeVector)
965 return LegalizeKind(LA, SVT.getVectorElementType());
966 return LegalizeKind(LA, NVT);
967 }
968
969 // Handle Extended Scalar Types.
970 if (!VT.isVector()) {
971 assert(VT.isInteger() && "Float types must be simple");
972 unsigned BitSize = VT.getSizeInBits();
973 // First promote to a power-of-two size, then expand if necessary.
974 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
975 EVT NVT = VT.getRoundIntegerType(Context);
976 assert(NVT != VT && "Unable to round integer VT");
977 LegalizeKind NextStep = getTypeConversion(Context, NVT);
978 // Avoid multi-step promotion.
979 if (NextStep.first == TypePromoteInteger)
980 return NextStep;
981 // Return rounded integer type.
982 return LegalizeKind(TypePromoteInteger, NVT);
983 }
984
986 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
987 }
988
989 // Handle vector types.
990 ElementCount NumElts = VT.getVectorElementCount();
991 EVT EltVT = VT.getVectorElementType();
992
993 // Vectors with only one element are always scalarized.
994 if (NumElts.isScalar())
995 return LegalizeKind(TypeScalarizeVector, EltVT);
996
997 // Try to widen vector elements until the element type is a power of two and
998 // promote it to a legal type later on, for example:
999 // <3 x i8> -> <4 x i8> -> <4 x i32>
1000 if (EltVT.isInteger()) {
1001 // Vectors with a number of elements that is not a power of two are always
1002 // widened, for example <3 x i8> -> <4 x i8>.
1003 if (!VT.isPow2VectorType()) {
1004 NumElts = NumElts.coefficientNextPowerOf2();
1005 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1006 return LegalizeKind(TypeWidenVector, NVT);
1007 }
1008
1009 // Examine the element type.
1010 LegalizeKind LK = getTypeConversion(Context, EltVT);
1011
1012 // If type is to be expanded, split the vector.
1013 // <4 x i140> -> <2 x i140>
1014 if (LK.first == TypeExpandInteger) {
1018 VT.getHalfNumVectorElementsVT(Context));
1019 }
1020
1021 // Promote the integer element types until a legal vector type is found
1022 // or until the element integer type is too big. If a legal type was not
1023 // found, fallback to the usual mechanism of widening/splitting the
1024 // vector.
1025 EVT OldEltVT = EltVT;
1026 while (true) {
1027 // Increase the bitwidth of the element to the next pow-of-two
1028 // (which is greater than 8 bits).
1029 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1030 .getRoundIntegerType(Context);
1031
1032 // Stop trying when getting a non-simple element type.
1033 // Note that vector elements may be greater than legal vector element
1034 // types. Example: X86 XMM registers hold 64bit element on 32bit
1035 // systems.
1036 if (!EltVT.isSimple())
1037 break;
1038
1039 // Build a new vector type and check if it is legal.
1040 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1041 // Found a legal promoted vector type.
1042 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1044 EVT::getVectorVT(Context, EltVT, NumElts));
1045 }
1046
1047 // Reset the type to the unexpanded type if we did not find a legal vector
1048 // type with a promoted vector element type.
1049 EltVT = OldEltVT;
1050 }
1051
1052 // Try to widen the vector until a legal type is found.
1053 // If there is no wider legal type, split the vector.
1054 while (true) {
1055 // Round up to the next power of 2.
1056 NumElts = NumElts.coefficientNextPowerOf2();
1057
1058 // If there is no simple vector type with this many elements then there
1059 // cannot be a larger legal vector type. Note that this assumes that
1060 // there are no skipped intermediate vector types in the simple types.
1061 if (!EltVT.isSimple())
1062 break;
1063 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1064 if (LargerVector == MVT())
1065 break;
1066
1067 // If this type is legal then widen the vector.
1068 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1069 return LegalizeKind(TypeWidenVector, LargerVector);
1070 }
1071
1072 // Widen odd vectors to next power of two.
1073 if (!VT.isPow2VectorType()) {
1074 EVT NVT = VT.getPow2VectorType(Context);
1075 return LegalizeKind(TypeWidenVector, NVT);
1076 }
1077
1080
1081 // Vectors with illegal element types are expanded.
1082 EVT NVT = EVT::getVectorVT(Context, EltVT,
1084 return LegalizeKind(TypeSplitVector, NVT);
1085}
1086
1087static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1088 unsigned &NumIntermediates,
1089 MVT &RegisterVT,
1090 TargetLoweringBase *TLI) {
1091 // Figure out the right, legal destination reg to copy into.
1093 MVT EltTy = VT.getVectorElementType();
1094
1095 unsigned NumVectorRegs = 1;
1096
1097 // Scalable vectors cannot be scalarized, so splitting or widening is
1098 // required.
1099 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1101 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1102
1103 // FIXME: We don't support non-power-of-2-sized vectors for now.
1104 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1105 if (!isPowerOf2_32(EC.getKnownMinValue())) {
1106 // Split EC to unit size (scalable property is preserved).
1107 NumVectorRegs = EC.getKnownMinValue();
1108 EC = ElementCount::getFixed(1);
1109 }
1110
1111 // Divide the input until we get to a supported size. This will
1112 // always end up with an EC that represent a scalar or a scalable
1113 // scalar.
1114 while (EC.getKnownMinValue() > 1 &&
1115 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1116 EC = EC.divideCoefficientBy(2);
1117 NumVectorRegs <<= 1;
1118 }
1119
1120 NumIntermediates = NumVectorRegs;
1121
1122 MVT NewVT = MVT::getVectorVT(EltTy, EC);
1123 if (!TLI->isTypeLegal(NewVT))
1124 NewVT = EltTy;
1125 IntermediateVT = NewVT;
1126
1127 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1128
1129 // Convert sizes such as i33 to i64.
1130 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits);
1131
1132 MVT DestVT = TLI->getRegisterType(NewVT);
1133 RegisterVT = DestVT;
1134 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1135 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1136
1137 // Otherwise, promotion or legal types use the same number of registers as
1138 // the vector decimated to the appropriate level.
1139 return NumVectorRegs;
1140}
1141
1142/// isLegalRC - Return true if the value types that can be represented by the
1143/// specified register class are all legal.
1145 const TargetRegisterClass &RC) const {
1146 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1147 if (isTypeLegal(*I))
1148 return true;
1149 return false;
1150}
1151
1152/// Replace/modify any TargetFrameIndex operands with a targte-dependent
1153/// sequence of memory operands that is recognized by PrologEpilogInserter.
1156 MachineBasicBlock *MBB) const {
1157 MachineInstr *MI = &InitialMI;
1158 MachineFunction &MF = *MI->getMF();
1159 MachineFrameInfo &MFI = MF.getFrameInfo();
1160
1161 // We're handling multiple types of operands here:
1162 // PATCHPOINT MetaArgs - live-in, read only, direct
1163 // STATEPOINT Deopt Spill - live-through, read only, indirect
1164 // STATEPOINT Deopt Alloca - live-through, read only, direct
1165 // (We're currently conservative and mark the deopt slots read/write in
1166 // practice.)
1167 // STATEPOINT GC Spill - live-through, read/write, indirect
1168 // STATEPOINT GC Alloca - live-through, read/write, direct
1169 // The live-in vs live-through is handled already (the live through ones are
1170 // all stack slots), but we need to handle the different type of stackmap
1171 // operands and memory effects here.
1172
1173 if (llvm::none_of(MI->operands(),
1174 [](MachineOperand &Operand) { return Operand.isFI(); }))
1175 return MBB;
1176
1177 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1178
1179 // Inherit previous memory operands.
1180 MIB.cloneMemRefs(*MI);
1181
1182 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1183 MachineOperand &MO = MI->getOperand(i);
1184 if (!MO.isFI()) {
1185 // Index of Def operand this Use it tied to.
1186 // Since Defs are coming before Uses, if Use is tied, then
1187 // index of Def must be smaller that index of that Use.
1188 // Also, Defs preserve their position in new MI.
1189 unsigned TiedTo = i;
1190 if (MO.isReg() && MO.isTied())
1191 TiedTo = MI->findTiedOperandIdx(i);
1192 MIB.add(MO);
1193 if (TiedTo < i)
1194 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1195 continue;
1196 }
1197
1198 // foldMemoryOperand builds a new MI after replacing a single FI operand
1199 // with the canonical set of five x86 addressing-mode operands.
1200 int FI = MO.getIndex();
1201
1202 // Add frame index operands recognized by stackmaps.cpp
1204 // indirect-mem-ref tag, size, #FI, offset.
1205 // Used for spills inserted by StatepointLowering. This codepath is not
1206 // used for patchpoints/stackmaps at all, for these spilling is done via
1207 // foldMemoryOperand callback only.
1208 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1209 MIB.addImm(StackMaps::IndirectMemRefOp);
1210 MIB.addImm(MFI.getObjectSize(FI));
1211 MIB.add(MO);
1212 MIB.addImm(0);
1213 } else {
1214 // direct-mem-ref tag, #FI, offset.
1215 // Used by patchpoint, and direct alloca arguments to statepoints
1216 MIB.addImm(StackMaps::DirectMemRefOp);
1217 MIB.add(MO);
1218 MIB.addImm(0);
1219 }
1220
1221 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1222
1223 // Add a new memory operand for this FI.
1224 assert(MFI.getObjectOffset(FI) != -1);
1225
1226 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1227 // PATCHPOINT should be updated to do the same. (TODO)
1228 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1229 auto Flags = MachineMemOperand::MOLoad;
1231 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1233 MIB->addMemOperand(MF, MMO);
1234 }
1235 }
1237 MI->eraseFromParent();
1238 return MBB;
1239}
1240
1241/// findRepresentativeClass - Return the largest legal super-reg register class
1242/// of the register class for the specified type and its associated "cost".
1243// This function is in TargetLowering because it uses RegClassForVT which would
1244// need to be moved to TargetRegisterInfo and would necessitate moving
1245// isTypeLegal over as well - a massive change that would just require
1246// TargetLowering having a TargetRegisterInfo class member that it would use.
1247std::pair<const TargetRegisterClass *, uint8_t>
1249 MVT VT) const {
1250 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1251 if (!RC)
1252 return std::make_pair(RC, 0);
1253
1254 // Compute the set of all super-register classes.
1255 BitVector SuperRegRC(TRI->getNumRegClasses());
1256 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1257 SuperRegRC.setBitsInMask(RCI.getMask());
1258
1259 // Find the first legal register class with the largest spill size.
1260 const TargetRegisterClass *BestRC = RC;
1261 for (unsigned i : SuperRegRC.set_bits()) {
1262 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1263 // We want the largest possible spill size.
1264 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1265 continue;
1266 if (!isLegalRC(*TRI, *SuperRC))
1267 continue;
1268 BestRC = SuperRC;
1269 }
1270 return std::make_pair(BestRC, 1);
1271}
1272
1273/// computeRegisterProperties - Once all of the register classes are added,
1274/// this allows us to compute derived properties we expose.
1276 const TargetRegisterInfo *TRI) {
1277 // Everything defaults to needing one register.
1278 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1279 NumRegistersForVT[i] = 1;
1280 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1281 }
1282 // ...except isVoid, which doesn't need any registers.
1283 NumRegistersForVT[MVT::isVoid] = 0;
1284
1285 // Find the largest integer register class.
1286 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1287 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1288 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1289
1290 // Every integer value type larger than this largest register takes twice as
1291 // many registers to represent as the previous ValueType.
1292 for (unsigned ExpandedReg = LargestIntReg + 1;
1293 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1294 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1295 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1296 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1297 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1299 }
1300
1301 // Inspect all of the ValueType's smaller than the largest integer
1302 // register to see which ones need promotion.
1303 unsigned LegalIntReg = LargestIntReg;
1304 for (unsigned IntReg = LargestIntReg - 1;
1305 IntReg >= (unsigned)MVT::i1; --IntReg) {
1306 MVT IVT = (MVT::SimpleValueType)IntReg;
1307 if (isTypeLegal(IVT)) {
1308 LegalIntReg = IntReg;
1309 } else {
1310 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1311 (MVT::SimpleValueType)LegalIntReg;
1312 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1313 }
1314 }
1315
1316 // ppcf128 type is really two f64's.
1317 if (!isTypeLegal(MVT::ppcf128)) {
1318 if (isTypeLegal(MVT::f64)) {
1319 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1320 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1321 TransformToType[MVT::ppcf128] = MVT::f64;
1322 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1323 } else {
1324 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1325 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1326 TransformToType[MVT::ppcf128] = MVT::i128;
1327 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1328 }
1329 }
1330
1331 // Decide how to handle f128. If the target does not have native f128 support,
1332 // expand it to i128 and we will be generating soft float library calls.
1333 if (!isTypeLegal(MVT::f128)) {
1334 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1335 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1336 TransformToType[MVT::f128] = MVT::i128;
1337 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1338 }
1339
1340 // Decide how to handle f80. If the target does not have native f80 support,
1341 // expand it to i96 and we will be generating soft float library calls.
1342 if (!isTypeLegal(MVT::f80)) {
1343 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32];
1344 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32];
1345 TransformToType[MVT::f80] = MVT::i32;
1346 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat);
1347 }
1348
1349 // Decide how to handle f64. If the target does not have native f64 support,
1350 // expand it to i64 and we will be generating soft float library calls.
1351 if (!isTypeLegal(MVT::f64)) {
1352 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1353 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1354 TransformToType[MVT::f64] = MVT::i64;
1355 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1356 }
1357
1358 // Decide how to handle f32. If the target does not have native f32 support,
1359 // expand it to i32 and we will be generating soft float library calls.
1360 if (!isTypeLegal(MVT::f32)) {
1361 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1362 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1363 TransformToType[MVT::f32] = MVT::i32;
1364 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1365 }
1366
1367 // Decide how to handle f16. If the target does not have native f16 support,
1368 // promote it to f32, because there are no f16 library calls (except for
1369 // conversions).
1370 if (!isTypeLegal(MVT::f16)) {
1371 // Allow targets to control how we legalize half.
1372 bool SoftPromoteHalfType = softPromoteHalfType();
1373 bool UseFPRegsForHalfType = !SoftPromoteHalfType || useFPRegsForHalfType();
1374
1375 if (!UseFPRegsForHalfType) {
1376 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1377 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1378 } else {
1379 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1380 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1381 }
1382 TransformToType[MVT::f16] = MVT::f32;
1383 if (SoftPromoteHalfType) {
1384 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1385 } else {
1386 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1387 }
1388 }
1389
1390 // Decide how to handle bf16. If the target does not have native bf16 support,
1391 // promote it to f32, because there are no bf16 library calls (except for
1392 // converting from f32 to bf16).
1393 if (!isTypeLegal(MVT::bf16)) {
1394 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32];
1395 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32];
1396 TransformToType[MVT::bf16] = MVT::f32;
1397 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf);
1398 }
1399
1400 // Loop over all of the vector value types to see which need transformations.
1401 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1402 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1403 MVT VT = (MVT::SimpleValueType) i;
1404 if (isTypeLegal(VT))
1405 continue;
1406
1407 MVT EltVT = VT.getVectorElementType();
1409 bool IsLegalWiderType = false;
1410 bool IsScalable = VT.isScalableVector();
1411 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1412 switch (PreferredAction) {
1413 case TypePromoteInteger: {
1414 MVT::SimpleValueType EndVT = IsScalable ?
1415 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1416 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1417 // Try to promote the elements of integer vectors. If no legal
1418 // promotion was found, fall through to the widen-vector method.
1419 for (unsigned nVT = i + 1;
1420 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1421 MVT SVT = (MVT::SimpleValueType) nVT;
1422 // Promote vectors of integers to vectors with the same number
1423 // of elements, with a wider element type.
1424 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1425 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1426 TransformToType[i] = SVT;
1427 RegisterTypeForVT[i] = SVT;
1428 NumRegistersForVT[i] = 1;
1429 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1430 IsLegalWiderType = true;
1431 break;
1432 }
1433 }
1434 if (IsLegalWiderType)
1435 break;
1436 [[fallthrough]];
1437 }
1438
1439 case TypeWidenVector:
1440 if (isPowerOf2_32(EC.getKnownMinValue())) {
1441 // Try to widen the vector.
1442 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1443 MVT SVT = (MVT::SimpleValueType) nVT;
1444 if (SVT.getVectorElementType() == EltVT &&
1445 SVT.isScalableVector() == IsScalable &&
1447 EC.getKnownMinValue() &&
1448 isTypeLegal(SVT)) {
1449 TransformToType[i] = SVT;
1450 RegisterTypeForVT[i] = SVT;
1451 NumRegistersForVT[i] = 1;
1452 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1453 IsLegalWiderType = true;
1454 break;
1455 }
1456 }
1457 if (IsLegalWiderType)
1458 break;
1459 } else {
1460 // Only widen to the next power of 2 to keep consistency with EVT.
1461 MVT NVT = VT.getPow2VectorType();
1462 if (isTypeLegal(NVT)) {
1463 TransformToType[i] = NVT;
1464 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1465 RegisterTypeForVT[i] = NVT;
1466 NumRegistersForVT[i] = 1;
1467 break;
1468 }
1469 }
1470 [[fallthrough]];
1471
1472 case TypeSplitVector:
1473 case TypeScalarizeVector: {
1474 MVT IntermediateVT;
1475 MVT RegisterVT;
1476 unsigned NumIntermediates;
1477 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1478 NumIntermediates, RegisterVT, this);
1479 NumRegistersForVT[i] = NumRegisters;
1480 assert(NumRegistersForVT[i] == NumRegisters &&
1481 "NumRegistersForVT size cannot represent NumRegisters!");
1482 RegisterTypeForVT[i] = RegisterVT;
1483
1484 MVT NVT = VT.getPow2VectorType();
1485 if (NVT == VT) {
1486 // Type is already a power of 2. The default action is to split.
1487 TransformToType[i] = MVT::Other;
1488 if (PreferredAction == TypeScalarizeVector)
1489 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1490 else if (PreferredAction == TypeSplitVector)
1491 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1492 else if (EC.getKnownMinValue() > 1)
1493 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1494 else
1495 ValueTypeActions.setTypeAction(VT, EC.isScalable()
1498 } else {
1499 TransformToType[i] = NVT;
1500 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1501 }
1502 break;
1503 }
1504 default:
1505 llvm_unreachable("Unknown vector legalization action!");
1506 }
1507 }
1508
1509 // Determine the 'representative' register class for each value type.
1510 // An representative register class is the largest (meaning one which is
1511 // not a sub-register class / subreg register class) legal register class for
1512 // a group of value types. For example, on i386, i8, i16, and i32
1513 // representative would be GR32; while on x86_64 it's GR64.
1514 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1515 const TargetRegisterClass* RRC;
1516 uint8_t Cost;
1518 RepRegClassForVT[i] = RRC;
1519 RepRegClassCostForVT[i] = Cost;
1520 }
1521}
1522
1524 EVT VT) const {
1525 assert(!VT.isVector() && "No default SetCC type for vectors!");
1526 return getPointerTy(DL).SimpleTy;
1527}
1528
1530 return MVT::i32; // return the default value
1531}
1532
1533/// getVectorTypeBreakdown - Vector types are broken down into some number of
1534/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1535/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1536/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1537///
1538/// This method returns the number of registers needed, and the VT for each
1539/// register. It also returns the VT and quantity of the intermediate values
1540/// before they are promoted/expanded.
1542 EVT VT, EVT &IntermediateVT,
1543 unsigned &NumIntermediates,
1544 MVT &RegisterVT) const {
1545 ElementCount EltCnt = VT.getVectorElementCount();
1546
1547 // If there is a wider vector type with the same element type as this one,
1548 // or a promoted vector type that has the same number of elements which
1549 // are wider, then we should convert to that legal vector type.
1550 // This handles things like <2 x float> -> <4 x float> and
1551 // <4 x i1> -> <4 x i32>.
1552 LegalizeTypeAction TA = getTypeAction(Context, VT);
1553 if (!EltCnt.isScalar() &&
1554 (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1555 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1556 if (isTypeLegal(RegisterEVT)) {
1557 IntermediateVT = RegisterEVT;
1558 RegisterVT = RegisterEVT.getSimpleVT();
1559 NumIntermediates = 1;
1560 return 1;
1561 }
1562 }
1563
1564 // Figure out the right, legal destination reg to copy into.
1565 EVT EltTy = VT.getVectorElementType();
1566
1567 unsigned NumVectorRegs = 1;
1568
1569 // Scalable vectors cannot be scalarized, so handle the legalisation of the
1570 // types like done elsewhere in SelectionDAG.
1571 if (EltCnt.isScalable()) {
1572 LegalizeKind LK;
1573 EVT PartVT = VT;
1574 do {
1575 // Iterate until we've found a legal (part) type to hold VT.
1576 LK = getTypeConversion(Context, PartVT);
1577 PartVT = LK.second;
1578 } while (LK.first != TypeLegal);
1579
1580 if (!PartVT.isVector()) {
1582 "Don't know how to legalize this scalable vector type");
1583 }
1584
1585 NumIntermediates =
1588 IntermediateVT = PartVT;
1589 RegisterVT = getRegisterType(Context, IntermediateVT);
1590 return NumIntermediates;
1591 }
1592
1593 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
1594 // we could break down into LHS/RHS like LegalizeDAG does.
1595 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1596 NumVectorRegs = EltCnt.getKnownMinValue();
1597 EltCnt = ElementCount::getFixed(1);
1598 }
1599
1600 // Divide the input until we get to a supported size. This will always
1601 // end with a scalar if the target doesn't support vectors.
1602 while (EltCnt.getKnownMinValue() > 1 &&
1603 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1604 EltCnt = EltCnt.divideCoefficientBy(2);
1605 NumVectorRegs <<= 1;
1606 }
1607
1608 NumIntermediates = NumVectorRegs;
1609
1610 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1611 if (!isTypeLegal(NewVT))
1612 NewVT = EltTy;
1613 IntermediateVT = NewVT;
1614
1615 MVT DestVT = getRegisterType(Context, NewVT);
1616 RegisterVT = DestVT;
1617
1618 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
1619 TypeSize NewVTSize = NewVT.getSizeInBits();
1620 // Convert sizes such as i33 to i64.
1621 if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue()))
1622 NewVTSize = NewVTSize.coefficientNextPowerOf2();
1623 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1624 }
1625
1626 // Otherwise, promotion or legal types use the same number of registers as
1627 // the vector decimated to the appropriate level.
1628 return NumVectorRegs;
1629}
1630
1632 uint64_t NumCases,
1634 ProfileSummaryInfo *PSI,
1635 BlockFrequencyInfo *BFI) const {
1636 // FIXME: This function check the maximum table size and density, but the
1637 // minimum size is not checked. It would be nice if the minimum size is
1638 // also combined within this function. Currently, the minimum size check is
1639 // performed in findJumpTable() in SelectionDAGBuiler and
1640 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1641 const bool OptForSize =
1642 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1643 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1644 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1645
1646 // Check whether the number of cases is small enough and
1647 // the range is dense enough for a jump table.
1648 return (OptForSize || Range <= MaxJumpTableSize) &&
1649 (NumCases * 100 >= Range * MinDensity);
1650}
1651
1653 EVT ConditionVT) const {
1654 return getRegisterType(Context, ConditionVT);
1655}
1656
1657/// Get the EVTs and ArgFlags collections that represent the legalized return
1658/// type of the given function. This does not require a DAG or a return value,
1659/// and is suitable for use before any DAGs for the function are constructed.
1660/// TODO: Move this out of TargetLowering.cpp.
1662 AttributeList attr,
1664 const TargetLowering &TLI, const DataLayout &DL) {
1665 SmallVector<EVT, 4> ValueVTs;
1666 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1667 unsigned NumValues = ValueVTs.size();
1668 if (NumValues == 0) return;
1669
1670 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1671 EVT VT = ValueVTs[j];
1672 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1673
1674 if (attr.hasRetAttr(Attribute::SExt))
1675 ExtendKind = ISD::SIGN_EXTEND;
1676 else if (attr.hasRetAttr(Attribute::ZExt))
1677 ExtendKind = ISD::ZERO_EXTEND;
1678
1679 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1680 VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind);
1681
1682 unsigned NumParts =
1683 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1684 MVT PartVT =
1685 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1686
1687 // 'inreg' on function refers to return value
1689 if (attr.hasRetAttr(Attribute::InReg))
1690 Flags.setInReg();
1691
1692 // Propagate extension type if any
1693 if (attr.hasRetAttr(Attribute::SExt))
1694 Flags.setSExt();
1695 else if (attr.hasRetAttr(Attribute::ZExt))
1696 Flags.setZExt();
1697
1698 for (unsigned i = 0; i < NumParts; ++i)
1699 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
1700 }
1701}
1702
1704 const DataLayout &DL) const {
1705 return DL.getABITypeAlign(Ty);
1706}
1707
1709 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1710 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
1711 // Check if the specified alignment is sufficient based on the data layout.
1712 // TODO: While using the data layout works in practice, a better solution
1713 // would be to implement this check directly (make this a virtual function).
1714 // For example, the ABI alignment may change based on software platform while
1715 // this function should only be affected by hardware implementation.
1716 Type *Ty = VT.getTypeForEVT(Context);
1717 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
1718 // Assume that an access that meets the ABI-specified alignment is fast.
1719 if (Fast != nullptr)
1720 *Fast = 1;
1721 return true;
1722 }
1723
1724 // This is a misaligned access.
1725 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
1726}
1727
1729 LLVMContext &Context, const DataLayout &DL, EVT VT,
1730 const MachineMemOperand &MMO, unsigned *Fast) const {
1731 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1732 MMO.getAlign(), MMO.getFlags(), Fast);
1733}
1734
1736 const DataLayout &DL, EVT VT,
1737 unsigned AddrSpace, Align Alignment,
1739 unsigned *Fast) const {
1740 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1741 Flags, Fast);
1742}
1743
1745 const DataLayout &DL, EVT VT,
1746 const MachineMemOperand &MMO,
1747 unsigned *Fast) const {
1748 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1749 MMO.getFlags(), Fast);
1750}
1751
1753 const DataLayout &DL, LLT Ty,
1754 const MachineMemOperand &MMO,
1755 unsigned *Fast) const {
1756 EVT VT = getApproximateEVTForLLT(Ty, Context);
1757 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1758 MMO.getFlags(), Fast);
1759}
1760
1761//===----------------------------------------------------------------------===//
1762// TargetTransformInfo Helpers
1763//===----------------------------------------------------------------------===//
1764
1766 enum InstructionOpcodes {
1767#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1768#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1769#include "llvm/IR/Instruction.def"
1770 };
1771 switch (static_cast<InstructionOpcodes>(Opcode)) {
1772 case Ret: return 0;
1773 case Br: return 0;
1774 case Switch: return 0;
1775 case IndirectBr: return 0;
1776 case Invoke: return 0;
1777 case CallBr: return 0;
1778 case Resume: return 0;
1779 case Unreachable: return 0;
1780 case CleanupRet: return 0;
1781 case CatchRet: return 0;
1782 case CatchPad: return 0;
1783 case CatchSwitch: return 0;
1784 case CleanupPad: return 0;
1785 case FNeg: return ISD::FNEG;
1786 case Add: return ISD::ADD;
1787 case FAdd: return ISD::FADD;
1788 case Sub: return ISD::SUB;
1789 case FSub: return ISD::FSUB;
1790 case Mul: return ISD::MUL;
1791 case FMul: return ISD::FMUL;
1792 case UDiv: return ISD::UDIV;
1793 case SDiv: return ISD::SDIV;
1794 case FDiv: return ISD::FDIV;
1795 case URem: return ISD::UREM;
1796 case SRem: return ISD::SREM;
1797 case FRem: return ISD::FREM;
1798 case Shl: return ISD::SHL;
1799 case LShr: return ISD::SRL;
1800 case AShr: return ISD::SRA;
1801 case And: return ISD::AND;
1802 case Or: return ISD::OR;
1803 case Xor: return ISD::XOR;
1804 case Alloca: return 0;
1805 case Load: return ISD::LOAD;
1806 case Store: return ISD::STORE;
1807 case GetElementPtr: return 0;
1808 case Fence: return 0;
1809 case AtomicCmpXchg: return 0;
1810 case AtomicRMW: return 0;
1811 case Trunc: return ISD::TRUNCATE;
1812 case ZExt: return ISD::ZERO_EXTEND;
1813 case SExt: return ISD::SIGN_EXTEND;
1814 case FPToUI: return ISD::FP_TO_UINT;
1815 case FPToSI: return ISD::FP_TO_SINT;
1816 case UIToFP: return ISD::UINT_TO_FP;
1817 case SIToFP: return ISD::SINT_TO_FP;
1818 case FPTrunc: return ISD::FP_ROUND;
1819 case FPExt: return ISD::FP_EXTEND;
1820 case PtrToInt: return ISD::BITCAST;
1821 case IntToPtr: return ISD::BITCAST;
1822 case BitCast: return ISD::BITCAST;
1823 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1824 case ICmp: return ISD::SETCC;
1825 case FCmp: return ISD::SETCC;
1826 case PHI: return 0;
1827 case Call: return 0;
1828 case Select: return ISD::SELECT;
1829 case UserOp1: return 0;
1830 case UserOp2: return 0;
1831 case VAArg: return 0;
1832 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1833 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1834 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1835 case ExtractValue: return ISD::MERGE_VALUES;
1836 case InsertValue: return ISD::MERGE_VALUES;
1837 case LandingPad: return 0;
1838 case Freeze: return ISD::FREEZE;
1839 }
1840
1841 llvm_unreachable("Unknown instruction type encountered!");
1842}
1843
1844Value *
1846 bool UseTLS) const {
1847 // compiler-rt provides a variable with a magic name. Targets that do not
1848 // link with compiler-rt may also provide such a variable.
1849 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1850 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1851 auto UnsafeStackPtr =
1852 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1853
1854 const DataLayout &DL = M->getDataLayout();
1855 PointerType *StackPtrTy = DL.getAllocaPtrType(M->getContext());
1856
1857 if (!UnsafeStackPtr) {
1858 auto TLSModel = UseTLS ?
1861 // The global variable is not defined yet, define it ourselves.
1862 // We use the initial-exec TLS model because we do not support the
1863 // variable living anywhere other than in the main executable.
1864 UnsafeStackPtr = new GlobalVariable(
1865 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1866 UnsafeStackPtrVar, nullptr, TLSModel);
1867 } else {
1868 // The variable exists, check its type and attributes.
1869 //
1870 // FIXME: Move to IR verifier.
1871 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1872 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1873 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1874 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1875 (UseTLS ? "" : "not ") + "be thread-local");
1876 }
1877 return UnsafeStackPtr;
1878}
1879
1880Value *
1882 if (!TM.getTargetTriple().isAndroid())
1883 return getDefaultSafeStackPointerLocation(IRB, true);
1884
1885 // Android provides a libc function to retrieve the address of the current
1886 // thread's unsafe stack pointer.
1887 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1888 auto *PtrTy = PointerType::getUnqual(M->getContext());
1889 FunctionCallee Fn =
1890 M->getOrInsertFunction("__safestack_pointer_address", PtrTy);
1891 return IRB.CreateCall(Fn);
1892}
1893
1894//===----------------------------------------------------------------------===//
1895// Loop Strength Reduction hooks
1896//===----------------------------------------------------------------------===//
1897
1898/// isLegalAddressingMode - Return true if the addressing mode represented
1899/// by AM is legal for this target, for a load/store of the specified type.
1901 const AddrMode &AM, Type *Ty,
1902 unsigned AS, Instruction *I) const {
1903 // The default implementation of this implements a conservative RISCy, r+r and
1904 // r+i addr mode.
1905
1906 // Scalable offsets not supported
1907 if (AM.ScalableOffset)
1908 return false;
1909
1910 // Allows a sign-extended 16-bit immediate field.
1911 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1912 return false;
1913
1914 // No global is ever allowed as a base.
1915 if (AM.BaseGV)
1916 return false;
1917
1918 // Only support r+r,
1919 switch (AM.Scale) {
1920 case 0: // "r+i" or just "i", depending on HasBaseReg.
1921 break;
1922 case 1:
1923 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1924 return false;
1925 // Otherwise we have r+r or r+i.
1926 break;
1927 case 2:
1928 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1929 return false;
1930 // Allow 2*r as r+r.
1931 break;
1932 default: // Don't allow n * r
1933 return false;
1934 }
1935
1936 return true;
1937}
1938
1939//===----------------------------------------------------------------------===//
1940// Stack Protector
1941//===----------------------------------------------------------------------===//
1942
1943// For OpenBSD return its special guard variable. Otherwise return nullptr,
1944// so that SelectionDAG handle SSP.
1946 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1947 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1948 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
1949 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
1950 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
1951 G->setVisibility(GlobalValue::HiddenVisibility);
1952 return C;
1953 }
1954 return nullptr;
1955}
1956
1957// Currently only support "standard" __stack_chk_guard.
1958// TODO: add LOAD_STACK_GUARD support.
1960 if (!M.getNamedValue("__stack_chk_guard")) {
1961 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()),
1963 nullptr, "__stack_chk_guard");
1964
1965 // FreeBSD has "__stack_chk_guard" defined externally on libc.so
1966 if (M.getDirectAccessExternalData() &&
1968 !(TM.getTargetTriple().isPPC64() &&
1969 TM.getTargetTriple().isOSFreeBSD()) &&
1970 (!TM.getTargetTriple().isOSDarwin() ||
1972 GV->setDSOLocal(true);
1973 }
1974}
1975
1976// Currently only support "standard" __stack_chk_guard.
1977// TODO: add LOAD_STACK_GUARD support.
1979 return M.getNamedValue("__stack_chk_guard");
1980}
1981
1983 return nullptr;
1984}
1985
1988}
1989
1992}
1993
1994unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1995 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1996}
1997
1999 return MaximumJumpTableSize;
2000}
2001
2004}
2005
2008}
2009
2011 if (TM.Options.LoopAlignment)
2012 return Align(TM.Options.LoopAlignment);
2013 return PrefLoopAlignment;
2014}
2015
2017 MachineBasicBlock *MBB) const {
2018 return MaxBytesForAlignment;
2019}
2020
2021//===----------------------------------------------------------------------===//
2022// Reciprocal Estimates
2023//===----------------------------------------------------------------------===//
2024
2025/// Get the reciprocal estimate attribute string for a function that will
2026/// override the target defaults.
2028 const Function &F = MF.getFunction();
2029 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2030}
2031
2032/// Construct a string for the given reciprocal operation of the given type.
2033/// This string should match the corresponding option to the front-end's
2034/// "-mrecip" flag assuming those strings have been passed through in an
2035/// attribute string. For example, "vec-divf" for a division of a vXf32.
2036static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2037 std::string Name = VT.isVector() ? "vec-" : "";
2038
2039 Name += IsSqrt ? "sqrt" : "div";
2040
2041 // TODO: Handle other float types?
2042 if (VT.getScalarType() == MVT::f64) {
2043 Name += "d";
2044 } else if (VT.getScalarType() == MVT::f16) {
2045 Name += "h";
2046 } else {
2047 assert(VT.getScalarType() == MVT::f32 &&
2048 "Unexpected FP type for reciprocal estimate");
2049 Name += "f";
2050 }
2051
2052 return Name;
2053}
2054
2055/// Return the character position and value (a single numeric character) of a
2056/// customized refinement operation in the input string if it exists. Return
2057/// false if there is no customized refinement step count.
2058static bool parseRefinementStep(StringRef In, size_t &Position,
2059 uint8_t &Value) {
2060 const char RefStepToken = ':';
2061 Position = In.find(RefStepToken);
2062 if (Position == StringRef::npos)
2063 return false;
2064
2065 StringRef RefStepString = In.substr(Position + 1);
2066 // Allow exactly one numeric character for the additional refinement
2067 // step parameter.
2068 if (RefStepString.size() == 1) {
2069 char RefStepChar = RefStepString[0];
2070 if (isDigit(RefStepChar)) {
2071 Value = RefStepChar - '0';
2072 return true;
2073 }
2074 }
2075 report_fatal_error("Invalid refinement step for -recip.");
2076}
2077
2078/// For the input attribute string, return one of the ReciprocalEstimate enum
2079/// status values (enabled, disabled, or not specified) for this operation on
2080/// the specified data type.
2081static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2082 if (Override.empty())
2084
2085 SmallVector<StringRef, 4> OverrideVector;
2086 Override.split(OverrideVector, ',');
2087 unsigned NumArgs = OverrideVector.size();
2088
2089 // Check if "all", "none", or "default" was specified.
2090 if (NumArgs == 1) {
2091 // Look for an optional setting of the number of refinement steps needed
2092 // for this type of reciprocal operation.
2093 size_t RefPos;
2094 uint8_t RefSteps;
2095 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2096 // Split the string for further processing.
2097 Override = Override.substr(0, RefPos);
2098 }
2099
2100 // All reciprocal types are enabled.
2101 if (Override == "all")
2103
2104 // All reciprocal types are disabled.
2105 if (Override == "none")
2107
2108 // Target defaults for enablement are used.
2109 if (Override == "default")
2111 }
2112
2113 // The attribute string may omit the size suffix ('f'/'d').
2114 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2115 std::string VTNameNoSize = VTName;
2116 VTNameNoSize.pop_back();
2117 static const char DisabledPrefix = '!';
2118
2119 for (StringRef RecipType : OverrideVector) {
2120 size_t RefPos;
2121 uint8_t RefSteps;
2122 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2123 RecipType = RecipType.substr(0, RefPos);
2124
2125 // Ignore the disablement token for string matching.
2126 bool IsDisabled = RecipType[0] == DisabledPrefix;
2127 if (IsDisabled)
2128 RecipType = RecipType.substr(1);
2129
2130 if (RecipType == VTName || RecipType == VTNameNoSize)
2133 }
2134
2136}
2137
2138/// For the input attribute string, return the customized refinement step count
2139/// for this operation on the specified data type. If the step count does not
2140/// exist, return the ReciprocalEstimate enum value for unspecified.
2141static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2142 if (Override.empty())
2144
2145 SmallVector<StringRef, 4> OverrideVector;
2146 Override.split(OverrideVector, ',');
2147 unsigned NumArgs = OverrideVector.size();
2148
2149 // Check if "all", "default", or "none" was specified.
2150 if (NumArgs == 1) {
2151 // Look for an optional setting of the number of refinement steps needed
2152 // for this type of reciprocal operation.
2153 size_t RefPos;
2154 uint8_t RefSteps;
2155 if (!parseRefinementStep(Override, RefPos, RefSteps))
2157
2158 // Split the string for further processing.
2159 Override = Override.substr(0, RefPos);
2160 assert(Override != "none" &&
2161 "Disabled reciprocals, but specifed refinement steps?");
2162
2163 // If this is a general override, return the specified number of steps.
2164 if (Override == "all" || Override == "default")
2165 return RefSteps;
2166 }
2167
2168 // The attribute string may omit the size suffix ('f'/'d').
2169 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2170 std::string VTNameNoSize = VTName;
2171 VTNameNoSize.pop_back();
2172
2173 for (StringRef RecipType : OverrideVector) {
2174 size_t RefPos;
2175 uint8_t RefSteps;
2176 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2177 continue;
2178
2179 RecipType = RecipType.substr(0, RefPos);
2180 if (RecipType == VTName || RecipType == VTNameNoSize)
2181 return RefSteps;
2182 }
2183
2185}
2186
2188 MachineFunction &MF) const {
2189 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2190}
2191
2193 MachineFunction &MF) const {
2194 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2195}
2196
2198 MachineFunction &MF) const {
2199 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2200}
2201
2203 MachineFunction &MF) const {
2204 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2205}
2206
2208 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
2209 const MachineMemOperand &MMO) const {
2210 // Single-element vectors are scalarized, so we should generally avoid having
2211 // any memory operations on such types, as they would get scalarized too.
2212 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() &&
2213 BitcastVT.getVectorNumElements() == 1)
2214 return false;
2215
2216 // Don't do if we could do an indexed load on the original type, but not on
2217 // the new one.
2218 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
2219 return true;
2220
2221 MVT LoadMVT = LoadVT.getSimpleVT();
2222
2223 // Don't bother doing this if it's just going to be promoted again later, as
2224 // doing so might interfere with other combines.
2225 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
2226 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
2227 return false;
2228
2229 unsigned Fast = 0;
2230 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
2231 MMO, &Fast) &&
2232 Fast;
2233}
2234
2237}
2238
2240 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC,
2241 const TargetLibraryInfo *LibInfo) const {
2243 if (LI.isVolatile())
2245
2246 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2248
2249 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2251
2253 LI.getAlign(), DL, &LI, AC,
2254 /*DT=*/nullptr, LibInfo))
2256
2257 Flags |= getTargetMMOFlags(LI);
2258 return Flags;
2259}
2260
2263 const DataLayout &DL) const {
2265
2266 if (SI.isVolatile())
2268
2269 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2271
2272 // FIXME: Not preserving dereferenceable
2273 Flags |= getTargetMMOFlags(SI);
2274 return Flags;
2275}
2276
2279 const DataLayout &DL) const {
2281
2282 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2283 if (RMW->isVolatile())
2285 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2286 if (CmpX->isVolatile())
2288 } else
2289 llvm_unreachable("not an atomic instruction");
2290
2291 // FIXME: Not preserving dereferenceable
2292 Flags |= getTargetMMOFlags(AI);
2293 return Flags;
2294}
2295
2297 Instruction *Inst,
2298 AtomicOrdering Ord) const {
2299 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2300 return Builder.CreateFence(Ord);
2301 else
2302 return nullptr;
2303}
2304
2306 Instruction *Inst,
2307 AtomicOrdering Ord) const {
2308 if (isAcquireOrStronger(Ord))
2309 return Builder.CreateFence(Ord);
2310 else
2311 return nullptr;
2312}
2313
2314//===----------------------------------------------------------------------===//
2315// GlobalISel Hooks
2316//===----------------------------------------------------------------------===//
2317
2319 const TargetTransformInfo *TTI) const {
2320 auto &MF = *MI.getMF();
2321 auto &MRI = MF.getRegInfo();
2322 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2323 // this helper function computes the maximum number of uses we should consider
2324 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2325 // break even in terms of code size when the original MI has 2 users vs
2326 // choosing to potentially spill. Any more than 2 users we we have a net code
2327 // size increase. This doesn't take into account register pressure though.
2328 auto maxUses = [](unsigned RematCost) {
2329 // A cost of 1 means remats are basically free.
2330 if (RematCost == 1)
2331 return std::numeric_limits<unsigned>::max();
2332 if (RematCost == 2)
2333 return 2U;
2334
2335 // Remat is too expensive, only sink if there's one user.
2336 if (RematCost > 2)
2337 return 1U;
2338 llvm_unreachable("Unexpected remat cost");
2339 };
2340
2341 switch (MI.getOpcode()) {
2342 default:
2343 return false;
2344 // Constants-like instructions should be close to their users.
2345 // We don't want long live-ranges for them.
2346 case TargetOpcode::G_CONSTANT:
2347 case TargetOpcode::G_FCONSTANT:
2348 case TargetOpcode::G_FRAME_INDEX:
2349 case TargetOpcode::G_INTTOPTR:
2350 return true;
2351 case TargetOpcode::G_GLOBAL_VALUE: {
2352 unsigned RematCost = TTI->getGISelRematGlobalCost();
2353 Register Reg = MI.getOperand(0).getReg();
2354 unsigned MaxUses = maxUses(RematCost);
2355 if (MaxUses == UINT_MAX)
2356 return true; // Remats are "free" so always localize.
2357 return MRI.hasAtMostUserInstrs(Reg, MaxUses);
2358 }
2359 }
2360}
unsigned const MachineRegisterInfo * MRI
AMDGPU Register Bank Select
Rewrite undef for PHI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
return RetTy
std::string Name
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
#define OP_TO_LIBCALL(Name, Enum)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
static cl::opt< bool > DisableStrictNodeMutation("disable-strictnode-mutation", cl::desc("Don't mutate strict-float node to a legalize node"), cl::init(false), cl::Hidden)
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, cl::desc("Set maximum size of jump tables."))
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
#define LCALL5(A)
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled,...
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults.
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:78
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
bool hasRetAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the return value.
Definition: Attributes.h:848
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add '1' bits from Mask to this vector.
Definition: BitVector.h:707
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
This class represents a range of values.
Definition: ConstantRange.h:47
unsigned getActiveBits() const
Compute the maximal number of active bits needed to represent every value in this range.
ConstantRange umul_sat(const ConstantRange &Other) const
Perform an unsigned saturating multiplication of two constant ranges.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size in bytes, rounded up to a whole number of bytes.
Definition: DataLayout.cpp:739
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition: TypeSize.h:314
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:657
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:113
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1842
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:193
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2449
bool hasAtomicStore() const LLVM_READONLY
Return true if this atomic instruction stores to memory.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:368
@ MAX_INT_BITS
Maximum number of bits that can be specified.
Definition: DerivedTypes.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Value * getPointerOperand()
Definition: Instructions.h:255
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:205
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Machine Value Type.
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto all_valuetypes()
SimpleValueType Iteration.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:578
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
unsigned getAddrSpace() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
void freezeReservedRegs()
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Class to represent pointers.
Definition: DerivedTypes.h:670
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:686
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:497
LLVMContext * getContext() const
Definition: SelectionDAG.h:510
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:571
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
static constexpr size_t npos
Definition: StringRef.h:53
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
Multiway switch.
Provides information about what library functions are available for the current target.
LegalizeTypeAction getTypeAction(MVT VT) const
void setTypeAction(MVT VT, LegalizeTypeAction Action)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
void initActions()
Initialize all of the actions to default values.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
virtual unsigned getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const
Return the maximum amount of bytes allowed to be emitted when padding for alignment.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual bool useFPRegsForHalfType() const
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
virtual bool softPromoteHalfType() const
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Value * getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual bool isJumpTableRelative() const
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
Return the type to use for a scalar shift opcode, given the shifted amount type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool isPositionIndependent() const
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
TargetOptions Options
unsigned LoopAlignment
If greater than 0, override TargetLoweringBase::PrefLoopAlignment.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
unsigned getGISelRematGlobalCost() const
bool isWindowsGNUEnvironment() const
Definition: Triple.h:685
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:797
bool isOSFreeBSD() const
Definition: Triple.h:611
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
Definition: Triple.h:1001
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:585
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
int getNumOccurrences() const
Definition: CommandLine.h:399
constexpr LeafTy coefficientNextPowerOf2() const
Definition: TypeSize.h:262
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition: TypeSize.h:254
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:243
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:753
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:44
@ SET_FPENV
Sets the current floating-point environment.
Definition: ISDOpcodes.h:1069
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
Definition: ISDOpcodes.h:1417
@ VECREDUCE_SMIN
Definition: ISDOpcodes.h:1450
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:512
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1340
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition: ISDOpcodes.h:374
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1342
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1343
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ RESET_FPENV
Set floating-point environment to default state.
Definition: ISDOpcodes.h:1073
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition: ISDOpcodes.h:502
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1102
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:380
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
Definition: ISDOpcodes.h:1092
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ VECTOR_FIND_LAST_ACTIVE
Definition: ISDOpcodes.h:1485
@ FATAN2
FATAN2 - atan2, inspired by libm.
Definition: ISDOpcodes.h:999
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1325
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:841
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:558
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
Definition: ISDOpcodes.h:1435
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
Definition: ISDOpcodes.h:1439
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:717
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
Definition: ISDOpcodes.h:1096
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:871
@ VECREDUCE_SMAX
Definition: ISDOpcodes.h:1449
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1338
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1339
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
Definition: ISDOpcodes.h:997
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition: ISDOpcodes.h:387
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1494
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition: ISDOpcodes.h:685
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
Definition: ISDOpcodes.h:1259
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
Definition: ISDOpcodes.h:1432
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:752
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:1292
@ TRUNCATE_SSAT_U
Definition: ISDOpcodes.h:834
@ VECREDUCE_FMIN
Definition: ISDOpcodes.h:1436
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1059
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:788
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:981
@ SSUBO
Same for subtraction.
Definition: ISDOpcodes.h:334
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1341
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition: ISDOpcodes.h:522
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition: ISDOpcodes.h:356
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ VECREDUCE_UMAX
Definition: ISDOpcodes.h:1451
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:642
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:330
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
Definition: ISDOpcodes.h:1444
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
Definition: ISDOpcodes.h:1087
@ GET_FPENV
Gets the current floating-point environment.
Definition: ISDOpcodes.h:1064
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ ATOMIC_LOAD_CLR
Definition: ISDOpcodes.h:1337
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:615
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1336
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
Definition: ISDOpcodes.h:1044
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:550
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1282
@ FP_TO_UINT_SAT
Definition: ISDOpcodes.h:907
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1319
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1344
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1031
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
Definition: ISDOpcodes.h:1286
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition: ISDOpcodes.h:366
@ SMULO
Same for multiplication.
Definition: ISDOpcodes.h:338
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:860
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:849
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:697
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:393
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:939
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:310
@ VECREDUCE_UMIN
Definition: ISDOpcodes.h:1452
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1334
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:1050
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1335
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:887
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1253
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1279
@ GET_FPENV_MEM
Gets the current floating-point environment.
Definition: ISDOpcodes.h:1078
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition: ISDOpcodes.h:705
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition: ISDOpcodes.h:680
@ VECREDUCE_FMUL
Definition: ISDOpcodes.h:1433
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition: ISDOpcodes.h:223
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:539
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
Definition: ISDOpcodes.h:627
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1333
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
Definition: ISDOpcodes.h:1004
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:920
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition: ISDOpcodes.h:669
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:882
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:958
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition: ISDOpcodes.h:906
@ VECREDUCE_FMINIMUM
Definition: ISDOpcodes.h:1440
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ VECREDUCE_SEQ_FMUL
Definition: ISDOpcodes.h:1418
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:508
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:347
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
Definition: ISDOpcodes.h:1398
@ SET_FPENV_MEM
Sets the current floating point environment.
Definition: ISDOpcodes.h:1083
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
Definition: ISDOpcodes.h:1055
@ TRUNCATE_SSAT_S
TRUNCATE_[SU]SAT_[SU] - Truncate for saturated operand [SU] located in middle, prefix for SAT means i...
Definition: ISDOpcodes.h:832
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition: ISDOpcodes.h:692
@ TRUNCATE_USAT_U
Definition: ISDOpcodes.h:836
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:320
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1610
static const int LAST_INDEXED_MODE
Definition: ISDOpcodes.h:1561
Libcall getFSINCOS(EVT RetVT)
getFSINCOS - Return the FSINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getPOWI(EVT RetVT)
getPOWI - Return the POWI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
void initCmpLibcallCCs(ISD::CondCode *CmpLibcallCCs)
Initialize the default condition code on the libcalls.
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none.
Libcall getLDEXP(EVT RetVT)
getLDEXP - Return the LDEXP_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFREXP(EVT RetVT)
getFREXP - Return the FREXP_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT)
Return the outline atomics value for the given opcode, atomic ordering and type, or UNKNOWN_LIBCALL i...
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getOutlineAtomicHelper(const Libcall(&LC)[5][4], AtomicOrdering Order, uint64_t MemSize)
Return the outline atomics value for the given atomic ordering, access size and set of libcalls for a...
Libcall getFPLibCall(EVT VT, Libcall Call_F32, Libcall Call_F64, Libcall Call_F80, Libcall Call_F128, Libcall Call_PPCF128)
GetFPLibCall - Helper to return the right libcall for the given floating point type,...
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:355
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
Definition: Sequence.h:337
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:216
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
constexpr force_iteration_on_noniterable_enum_t force_iteration_on_noniterable_enum
Definition: Sequence.h:108
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:342
bool isReleaseOrStronger(AtomicOrdering AO)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:293
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
AtomicOrdering
Atomic ordering for LLVM's memory model.
EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:405
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:79
bool isAcquireOrStronger(AtomicOrdering AO)
InstructionCost Cost
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
Definition: ValueTypes.h:472
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:74
ElementCount getVectorElementCount() const
Definition: ValueTypes.h:345
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:465
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
bool isFixedLengthVector() const
Definition: ValueTypes.h:181
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
Definition: ValueTypes.h:414
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:318
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:210
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:331
bool isZeroSized() const
Test if the given EVT has zero size, this will fail if called on a scalable type.
Definition: ValueTypes.h:132
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition: ValueTypes.h:448
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...