LLVM 23.0.0git
TargetLoweringBase.cpp
Go to the documentation of this file.
1//===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the TargetLoweringBase class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ADT/BitVector.h"
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/ADT/Twine.h"
20#include "llvm/Analysis/Loads.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/CallingConv.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GlobalValue.h"
46#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/Module.h"
48#include "llvm/IR/Type.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <string>
63#include <tuple>
64#include <utility>
65
66using namespace llvm;
67
69 "jump-is-expensive", cl::init(false),
70 cl::desc("Do not create extra branches to split comparison logic."),
72
74 ("min-jump-table-entries", cl::init(4), cl::Hidden,
75 cl::desc("Set minimum number of entries to use a jump table."));
76
78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79 cl::desc("Set maximum size of jump tables."));
80
81/// Minimum jump table density for normal functions.
83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84 cl::desc("Minimum density for building a jump table in "
85 "a normal function"));
86
87/// Minimum jump table density for -Os or -Oz functions.
89 "optsize-jump-table-density", cl::init(40), cl::Hidden,
90 cl::desc("Minimum density for building a jump table in "
91 "an optsize function"));
92
94 "min-bit-test-cmps", cl::init(2), cl::Hidden,
95 cl::desc("Set minimum of largest number of comparisons "
96 "to use bit test for switch."));
97
99 "max-store-memset", cl::init(0), cl::Hidden,
100 cl::desc("Override target's MaxStoresPerMemset and "
101 "MaxStoresPerMemsetOptSize. "
102 "Set to 0 to use the target default."));
103
105 "max-store-memcpy", cl::init(0), cl::Hidden,
106 cl::desc("Override target's MaxStoresPerMemcpy and "
107 "MaxStoresPerMemcpyOptSize. "
108 "Set to 0 to use the target default."));
109
111 "max-store-memmove", cl::init(0), cl::Hidden,
112 cl::desc("Override target's MaxStoresPerMemmove and "
113 "MaxStoresPerMemmoveOptSize. "
114 "Set to 0 to use the target default."));
115
116// FIXME: This option is only to test if the strict fp operation processed
117// correctly by preventing mutating strict fp operation to normal fp operation
118// during development. When the backend supports strict float operation, this
119// option will be meaningless.
120static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
121 cl::desc("Don't mutate strict-float node to a legalize node"),
122 cl::init(false), cl::Hidden);
123
124LLVM_ABI RTLIB::Libcall RTLIB::getSHL(EVT VT) {
125 if (VT == MVT::i16)
126 return RTLIB::SHL_I16;
127 if (VT == MVT::i32)
128 return RTLIB::SHL_I32;
129 if (VT == MVT::i64)
130 return RTLIB::SHL_I64;
131 if (VT == MVT::i128)
132 return RTLIB::SHL_I128;
133
134 return RTLIB::UNKNOWN_LIBCALL;
135}
136
137LLVM_ABI RTLIB::Libcall RTLIB::getSRL(EVT VT) {
138 if (VT == MVT::i16)
139 return RTLIB::SRL_I16;
140 if (VT == MVT::i32)
141 return RTLIB::SRL_I32;
142 if (VT == MVT::i64)
143 return RTLIB::SRL_I64;
144 if (VT == MVT::i128)
145 return RTLIB::SRL_I128;
146
147 return RTLIB::UNKNOWN_LIBCALL;
148}
149
150LLVM_ABI RTLIB::Libcall RTLIB::getSRA(EVT VT) {
151 if (VT == MVT::i16)
152 return RTLIB::SRA_I16;
153 if (VT == MVT::i32)
154 return RTLIB::SRA_I32;
155 if (VT == MVT::i64)
156 return RTLIB::SRA_I64;
157 if (VT == MVT::i128)
158 return RTLIB::SRA_I128;
159
160 return RTLIB::UNKNOWN_LIBCALL;
161}
162
163LLVM_ABI RTLIB::Libcall RTLIB::getMUL(EVT VT) {
164 if (VT == MVT::i16)
165 return RTLIB::MUL_I16;
166 if (VT == MVT::i32)
167 return RTLIB::MUL_I32;
168 if (VT == MVT::i64)
169 return RTLIB::MUL_I64;
170 if (VT == MVT::i128)
171 return RTLIB::MUL_I128;
172 return RTLIB::UNKNOWN_LIBCALL;
173}
174
175LLVM_ABI RTLIB::Libcall RTLIB::getMULO(EVT VT) {
176 if (VT == MVT::i32)
177 return RTLIB::MULO_I32;
178 if (VT == MVT::i64)
179 return RTLIB::MULO_I64;
180 if (VT == MVT::i128)
181 return RTLIB::MULO_I128;
182 return RTLIB::UNKNOWN_LIBCALL;
183}
184
185LLVM_ABI RTLIB::Libcall RTLIB::getSDIV(EVT VT) {
186 if (VT == MVT::i16)
187 return RTLIB::SDIV_I16;
188 if (VT == MVT::i32)
189 return RTLIB::SDIV_I32;
190 if (VT == MVT::i64)
191 return RTLIB::SDIV_I64;
192 if (VT == MVT::i128)
193 return RTLIB::SDIV_I128;
194 return RTLIB::UNKNOWN_LIBCALL;
195}
196
197LLVM_ABI RTLIB::Libcall RTLIB::getUDIV(EVT VT) {
198 if (VT == MVT::i16)
199 return RTLIB::UDIV_I16;
200 if (VT == MVT::i32)
201 return RTLIB::UDIV_I32;
202 if (VT == MVT::i64)
203 return RTLIB::UDIV_I64;
204 if (VT == MVT::i128)
205 return RTLIB::UDIV_I128;
206 return RTLIB::UNKNOWN_LIBCALL;
207}
208
209LLVM_ABI RTLIB::Libcall RTLIB::getSREM(EVT VT) {
210 if (VT == MVT::i16)
211 return RTLIB::SREM_I16;
212 if (VT == MVT::i32)
213 return RTLIB::SREM_I32;
214 if (VT == MVT::i64)
215 return RTLIB::SREM_I64;
216 if (VT == MVT::i128)
217 return RTLIB::SREM_I128;
218 return RTLIB::UNKNOWN_LIBCALL;
219}
220
221LLVM_ABI RTLIB::Libcall RTLIB::getUREM(EVT VT) {
222 if (VT == MVT::i16)
223 return RTLIB::UREM_I16;
224 if (VT == MVT::i32)
225 return RTLIB::UREM_I32;
226 if (VT == MVT::i64)
227 return RTLIB::UREM_I64;
228 if (VT == MVT::i128)
229 return RTLIB::UREM_I128;
230 return RTLIB::UNKNOWN_LIBCALL;
231}
232
233LLVM_ABI RTLIB::Libcall RTLIB::getCTPOP(EVT VT) {
234 if (VT == MVT::i32)
235 return RTLIB::CTPOP_I32;
236 if (VT == MVT::i64)
237 return RTLIB::CTPOP_I64;
238 if (VT == MVT::i128)
239 return RTLIB::CTPOP_I128;
240 return RTLIB::UNKNOWN_LIBCALL;
241}
242
243/// GetFPLibCall - Helper to return the right libcall for the given floating
244/// point type, or UNKNOWN_LIBCALL if there is none.
245RTLIB::Libcall RTLIB::getFPLibCall(EVT VT,
246 RTLIB::Libcall Call_F32,
247 RTLIB::Libcall Call_F64,
248 RTLIB::Libcall Call_F80,
249 RTLIB::Libcall Call_F128,
250 RTLIB::Libcall Call_PPCF128) {
251 return
252 VT == MVT::f32 ? Call_F32 :
253 VT == MVT::f64 ? Call_F64 :
254 VT == MVT::f80 ? Call_F80 :
255 VT == MVT::f128 ? Call_F128 :
256 VT == MVT::ppcf128 ? Call_PPCF128 :
257 RTLIB::UNKNOWN_LIBCALL;
258}
259
260/// getFPEXT - Return the FPEXT_*_* value for the given types, or
261/// UNKNOWN_LIBCALL if there is none.
262RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
263 if (OpVT == MVT::f16) {
264 if (RetVT == MVT::f32)
265 return FPEXT_F16_F32;
266 if (RetVT == MVT::f64)
267 return FPEXT_F16_F64;
268 if (RetVT == MVT::f80)
269 return FPEXT_F16_F80;
270 if (RetVT == MVT::f128)
271 return FPEXT_F16_F128;
272 } else if (OpVT == MVT::f32) {
273 if (RetVT == MVT::f64)
274 return FPEXT_F32_F64;
275 if (RetVT == MVT::f128)
276 return FPEXT_F32_F128;
277 if (RetVT == MVT::ppcf128)
278 return FPEXT_F32_PPCF128;
279 } else if (OpVT == MVT::f64) {
280 if (RetVT == MVT::f128)
281 return FPEXT_F64_F128;
282 else if (RetVT == MVT::ppcf128)
283 return FPEXT_F64_PPCF128;
284 } else if (OpVT == MVT::f80) {
285 if (RetVT == MVT::f128)
286 return FPEXT_F80_F128;
287 } else if (OpVT == MVT::bf16) {
288 if (RetVT == MVT::f32)
289 return FPEXT_BF16_F32;
290 }
291
292 return UNKNOWN_LIBCALL;
293}
294
295/// getFPROUND - Return the FPROUND_*_* value for the given types, or
296/// UNKNOWN_LIBCALL if there is none.
297RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
298 if (RetVT == MVT::f16) {
299 if (OpVT == MVT::f32)
300 return FPROUND_F32_F16;
301 if (OpVT == MVT::f64)
302 return FPROUND_F64_F16;
303 if (OpVT == MVT::f80)
304 return FPROUND_F80_F16;
305 if (OpVT == MVT::f128)
306 return FPROUND_F128_F16;
307 if (OpVT == MVT::ppcf128)
308 return FPROUND_PPCF128_F16;
309 } else if (RetVT == MVT::bf16) {
310 if (OpVT == MVT::f32)
311 return FPROUND_F32_BF16;
312 if (OpVT == MVT::f64)
313 return FPROUND_F64_BF16;
314 if (OpVT == MVT::f80)
315 return FPROUND_F80_BF16;
316 if (OpVT == MVT::f128)
317 return FPROUND_F128_BF16;
318 } else if (RetVT == MVT::f32) {
319 if (OpVT == MVT::f64)
320 return FPROUND_F64_F32;
321 if (OpVT == MVT::f80)
322 return FPROUND_F80_F32;
323 if (OpVT == MVT::f128)
324 return FPROUND_F128_F32;
325 if (OpVT == MVT::ppcf128)
326 return FPROUND_PPCF128_F32;
327 } else if (RetVT == MVT::f64) {
328 if (OpVT == MVT::f80)
329 return FPROUND_F80_F64;
330 if (OpVT == MVT::f128)
331 return FPROUND_F128_F64;
332 if (OpVT == MVT::ppcf128)
333 return FPROUND_PPCF128_F64;
334 } else if (RetVT == MVT::f80) {
335 if (OpVT == MVT::f128)
336 return FPROUND_F128_F80;
337 }
338
339 return UNKNOWN_LIBCALL;
340}
341
342/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
343/// UNKNOWN_LIBCALL if there is none.
344RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
345 if (OpVT == MVT::f16) {
346 if (RetVT == MVT::i32)
347 return FPTOSINT_F16_I32;
348 if (RetVT == MVT::i64)
349 return FPTOSINT_F16_I64;
350 if (RetVT == MVT::i128)
351 return FPTOSINT_F16_I128;
352 } else if (OpVT == MVT::f32) {
353 if (RetVT == MVT::i32)
354 return FPTOSINT_F32_I32;
355 if (RetVT == MVT::i64)
356 return FPTOSINT_F32_I64;
357 if (RetVT == MVT::i128)
358 return FPTOSINT_F32_I128;
359 } else if (OpVT == MVT::f64) {
360 if (RetVT == MVT::i32)
361 return FPTOSINT_F64_I32;
362 if (RetVT == MVT::i64)
363 return FPTOSINT_F64_I64;
364 if (RetVT == MVT::i128)
365 return FPTOSINT_F64_I128;
366 } else if (OpVT == MVT::f80) {
367 if (RetVT == MVT::i32)
368 return FPTOSINT_F80_I32;
369 if (RetVT == MVT::i64)
370 return FPTOSINT_F80_I64;
371 if (RetVT == MVT::i128)
372 return FPTOSINT_F80_I128;
373 } else if (OpVT == MVT::f128) {
374 if (RetVT == MVT::i32)
375 return FPTOSINT_F128_I32;
376 if (RetVT == MVT::i64)
377 return FPTOSINT_F128_I64;
378 if (RetVT == MVT::i128)
379 return FPTOSINT_F128_I128;
380 } else if (OpVT == MVT::ppcf128) {
381 if (RetVT == MVT::i32)
382 return FPTOSINT_PPCF128_I32;
383 if (RetVT == MVT::i64)
384 return FPTOSINT_PPCF128_I64;
385 if (RetVT == MVT::i128)
386 return FPTOSINT_PPCF128_I128;
387 }
388 return UNKNOWN_LIBCALL;
389}
390
391/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
392/// UNKNOWN_LIBCALL if there is none.
393RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
394 if (OpVT == MVT::f16) {
395 if (RetVT == MVT::i32)
396 return FPTOUINT_F16_I32;
397 if (RetVT == MVT::i64)
398 return FPTOUINT_F16_I64;
399 if (RetVT == MVT::i128)
400 return FPTOUINT_F16_I128;
401 } else if (OpVT == MVT::f32) {
402 if (RetVT == MVT::i32)
403 return FPTOUINT_F32_I32;
404 if (RetVT == MVT::i64)
405 return FPTOUINT_F32_I64;
406 if (RetVT == MVT::i128)
407 return FPTOUINT_F32_I128;
408 } else if (OpVT == MVT::f64) {
409 if (RetVT == MVT::i32)
410 return FPTOUINT_F64_I32;
411 if (RetVT == MVT::i64)
412 return FPTOUINT_F64_I64;
413 if (RetVT == MVT::i128)
414 return FPTOUINT_F64_I128;
415 } else if (OpVT == MVT::f80) {
416 if (RetVT == MVT::i32)
417 return FPTOUINT_F80_I32;
418 if (RetVT == MVT::i64)
419 return FPTOUINT_F80_I64;
420 if (RetVT == MVT::i128)
421 return FPTOUINT_F80_I128;
422 } else if (OpVT == MVT::f128) {
423 if (RetVT == MVT::i32)
424 return FPTOUINT_F128_I32;
425 if (RetVT == MVT::i64)
426 return FPTOUINT_F128_I64;
427 if (RetVT == MVT::i128)
428 return FPTOUINT_F128_I128;
429 } else if (OpVT == MVT::ppcf128) {
430 if (RetVT == MVT::i32)
431 return FPTOUINT_PPCF128_I32;
432 if (RetVT == MVT::i64)
433 return FPTOUINT_PPCF128_I64;
434 if (RetVT == MVT::i128)
435 return FPTOUINT_PPCF128_I128;
436 }
437 return UNKNOWN_LIBCALL;
438}
439
440/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
441/// UNKNOWN_LIBCALL if there is none.
442RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
443 if (OpVT == MVT::i32) {
444 if (RetVT == MVT::f16)
445 return SINTTOFP_I32_F16;
446 if (RetVT == MVT::f32)
447 return SINTTOFP_I32_F32;
448 if (RetVT == MVT::f64)
449 return SINTTOFP_I32_F64;
450 if (RetVT == MVT::f80)
451 return SINTTOFP_I32_F80;
452 if (RetVT == MVT::f128)
453 return SINTTOFP_I32_F128;
454 if (RetVT == MVT::ppcf128)
455 return SINTTOFP_I32_PPCF128;
456 } else if (OpVT == MVT::i64) {
457 if (RetVT == MVT::bf16)
458 return SINTTOFP_I64_BF16;
459 if (RetVT == MVT::f16)
460 return SINTTOFP_I64_F16;
461 if (RetVT == MVT::f32)
462 return SINTTOFP_I64_F32;
463 if (RetVT == MVT::f64)
464 return SINTTOFP_I64_F64;
465 if (RetVT == MVT::f80)
466 return SINTTOFP_I64_F80;
467 if (RetVT == MVT::f128)
468 return SINTTOFP_I64_F128;
469 if (RetVT == MVT::ppcf128)
470 return SINTTOFP_I64_PPCF128;
471 } else if (OpVT == MVT::i128) {
472 if (RetVT == MVT::f16)
473 return SINTTOFP_I128_F16;
474 if (RetVT == MVT::f32)
475 return SINTTOFP_I128_F32;
476 if (RetVT == MVT::f64)
477 return SINTTOFP_I128_F64;
478 if (RetVT == MVT::f80)
479 return SINTTOFP_I128_F80;
480 if (RetVT == MVT::f128)
481 return SINTTOFP_I128_F128;
482 if (RetVT == MVT::ppcf128)
483 return SINTTOFP_I128_PPCF128;
484 }
485 return UNKNOWN_LIBCALL;
486}
487
488/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
489/// UNKNOWN_LIBCALL if there is none.
490RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
491 if (OpVT == MVT::i32) {
492 if (RetVT == MVT::f16)
493 return UINTTOFP_I32_F16;
494 if (RetVT == MVT::f32)
495 return UINTTOFP_I32_F32;
496 if (RetVT == MVT::f64)
497 return UINTTOFP_I32_F64;
498 if (RetVT == MVT::f80)
499 return UINTTOFP_I32_F80;
500 if (RetVT == MVT::f128)
501 return UINTTOFP_I32_F128;
502 if (RetVT == MVT::ppcf128)
503 return UINTTOFP_I32_PPCF128;
504 } else if (OpVT == MVT::i64) {
505 if (RetVT == MVT::bf16)
506 return UINTTOFP_I64_BF16;
507 if (RetVT == MVT::f16)
508 return UINTTOFP_I64_F16;
509 if (RetVT == MVT::f32)
510 return UINTTOFP_I64_F32;
511 if (RetVT == MVT::f64)
512 return UINTTOFP_I64_F64;
513 if (RetVT == MVT::f80)
514 return UINTTOFP_I64_F80;
515 if (RetVT == MVT::f128)
516 return UINTTOFP_I64_F128;
517 if (RetVT == MVT::ppcf128)
518 return UINTTOFP_I64_PPCF128;
519 } else if (OpVT == MVT::i128) {
520 if (RetVT == MVT::f16)
521 return UINTTOFP_I128_F16;
522 if (RetVT == MVT::f32)
523 return UINTTOFP_I128_F32;
524 if (RetVT == MVT::f64)
525 return UINTTOFP_I128_F64;
526 if (RetVT == MVT::f80)
527 return UINTTOFP_I128_F80;
528 if (RetVT == MVT::f128)
529 return UINTTOFP_I128_F128;
530 if (RetVT == MVT::ppcf128)
531 return UINTTOFP_I128_PPCF128;
532 }
533 return UNKNOWN_LIBCALL;
534}
535
536RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) {
537 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
538 POWI_PPCF128);
539}
540
541RTLIB::Libcall RTLIB::getPOW(EVT RetVT) {
542 // TODO: Tablegen should generate this function
543 if (RetVT.isVector()) {
544 if (!RetVT.isSimple())
545 return RTLIB::UNKNOWN_LIBCALL;
546 switch (RetVT.getSimpleVT().SimpleTy) {
547 case MVT::v4f32:
548 return RTLIB::POW_V4F32;
549 case MVT::v2f64:
550 return RTLIB::POW_V2F64;
551 case MVT::nxv4f32:
552 return RTLIB::POW_NXV4F32;
553 case MVT::nxv2f64:
554 return RTLIB::POW_NXV2F64;
555 default:
556 return RTLIB::UNKNOWN_LIBCALL;
557 }
558 }
559
560 return getFPLibCall(RetVT, POW_F32, POW_F64, POW_F80, POW_F128, POW_PPCF128);
561}
562
563RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) {
564 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128,
565 LDEXP_PPCF128);
566}
567
568RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) {
569 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128,
570 FREXP_PPCF128);
571}
572
573RTLIB::Libcall RTLIB::getSIN(EVT RetVT) {
574 return getFPLibCall(RetVT, SIN_F32, SIN_F64, SIN_F80, SIN_F128, SIN_PPCF128);
575}
576
577RTLIB::Libcall RTLIB::getCOS(EVT RetVT) {
578 return getFPLibCall(RetVT, COS_F32, COS_F64, COS_F80, COS_F128, COS_PPCF128);
579}
580
581RTLIB::Libcall RTLIB::getSINCOS(EVT RetVT) {
582 // TODO: Tablegen should generate this function
583 if (RetVT.isVector()) {
584 if (!RetVT.isSimple())
585 return RTLIB::UNKNOWN_LIBCALL;
586 switch (RetVT.getSimpleVT().SimpleTy) {
587 case MVT::v4f32:
588 return RTLIB::SINCOS_V4F32;
589 case MVT::v8f32:
590 return RTLIB::SINCOS_V8F32;
591 case MVT::v16f32:
592 return RTLIB::SINCOS_V16F32;
593 case MVT::v2f64:
594 return RTLIB::SINCOS_V2F64;
595 case MVT::v4f64:
596 return RTLIB::SINCOS_V4F64;
597 case MVT::v8f64:
598 return RTLIB::SINCOS_V8F64;
599 case MVT::nxv4f32:
600 return RTLIB::SINCOS_NXV4F32;
601 case MVT::nxv2f64:
602 return RTLIB::SINCOS_NXV2F64;
603 default:
604 return RTLIB::UNKNOWN_LIBCALL;
605 }
606 }
607
608 return getFPLibCall(RetVT, SINCOS_F32, SINCOS_F64, SINCOS_F80, SINCOS_F128,
609 SINCOS_PPCF128);
610}
611
612RTLIB::Libcall RTLIB::getSINCOSPI(EVT RetVT) {
613 // TODO: Tablegen should generate this function
614 if (RetVT.isVector()) {
615 if (!RetVT.isSimple())
616 return RTLIB::UNKNOWN_LIBCALL;
617 switch (RetVT.getSimpleVT().SimpleTy) {
618 case MVT::v4f32:
619 return RTLIB::SINCOSPI_V4F32;
620 case MVT::v2f64:
621 return RTLIB::SINCOSPI_V2F64;
622 case MVT::nxv4f32:
623 return RTLIB::SINCOSPI_NXV4F32;
624 case MVT::nxv2f64:
625 return RTLIB::SINCOSPI_NXV2F64;
626 default:
627 return RTLIB::UNKNOWN_LIBCALL;
628 }
629 }
630
631 return getFPLibCall(RetVT, SINCOSPI_F32, SINCOSPI_F64, SINCOSPI_F80,
632 SINCOSPI_F128, SINCOSPI_PPCF128);
633}
634
635RTLIB::Libcall RTLIB::getSINCOS_STRET(EVT RetVT) {
636 return getFPLibCall(RetVT, SINCOS_STRET_F32, SINCOS_STRET_F64,
637 UNKNOWN_LIBCALL, UNKNOWN_LIBCALL, UNKNOWN_LIBCALL);
638}
639
640RTLIB::Libcall RTLIB::getREM(EVT VT) {
641 // TODO: Tablegen should generate this function
642 if (VT.isVector()) {
643 if (!VT.isSimple())
644 return RTLIB::UNKNOWN_LIBCALL;
645 switch (VT.getSimpleVT().SimpleTy) {
646 case MVT::v4f32:
647 return RTLIB::REM_V4F32;
648 case MVT::v2f64:
649 return RTLIB::REM_V2F64;
650 case MVT::nxv4f32:
651 return RTLIB::REM_NXV4F32;
652 case MVT::nxv2f64:
653 return RTLIB::REM_NXV2F64;
654 default:
655 return RTLIB::UNKNOWN_LIBCALL;
656 }
657 }
658
659 return getFPLibCall(VT, REM_F32, REM_F64, REM_F80, REM_F128, REM_PPCF128);
660}
661
662RTLIB::Libcall RTLIB::getCBRT(EVT VT) {
663 // TODO: Tablegen should generate this function
664 if (VT.isVector()) {
665 if (!VT.isSimple())
666 return RTLIB::UNKNOWN_LIBCALL;
667 switch (VT.getSimpleVT().SimpleTy) {
668 case MVT::v4f32:
669 return RTLIB::CBRT_V4F32;
670 case MVT::v2f64:
671 return RTLIB::CBRT_V2F64;
672 case MVT::nxv4f32:
673 return RTLIB::CBRT_NXV4F32;
674 case MVT::nxv2f64:
675 return RTLIB::CBRT_NXV2F64;
676 default:
677 return RTLIB::UNKNOWN_LIBCALL;
678 }
679 }
680
681 return getFPLibCall(VT, CBRT_F32, CBRT_F64, CBRT_F80, CBRT_F128,
682 CBRT_PPCF128);
683}
684
685RTLIB::Libcall RTLIB::getMODF(EVT RetVT) {
686 // TODO: Tablegen should generate this function
687 if (RetVT.isVector()) {
688 if (!RetVT.isSimple())
689 return RTLIB::UNKNOWN_LIBCALL;
690 switch (RetVT.getSimpleVT().SimpleTy) {
691 case MVT::v4f32:
692 return RTLIB::MODF_V4F32;
693 case MVT::v2f64:
694 return RTLIB::MODF_V2F64;
695 case MVT::nxv4f32:
696 return RTLIB::MODF_NXV4F32;
697 case MVT::nxv2f64:
698 return RTLIB::MODF_NXV2F64;
699 default:
700 return RTLIB::UNKNOWN_LIBCALL;
701 }
702 }
703
704 return getFPLibCall(RetVT, MODF_F32, MODF_F64, MODF_F80, MODF_F128,
705 MODF_PPCF128);
706}
707
708RTLIB::Libcall RTLIB::getLROUND(EVT VT) {
709 if (VT == MVT::f32)
710 return RTLIB::LROUND_F32;
711 if (VT == MVT::f64)
712 return RTLIB::LROUND_F64;
713 if (VT == MVT::f80)
714 return RTLIB::LROUND_F80;
715 if (VT == MVT::f128)
716 return RTLIB::LROUND_F128;
717 if (VT == MVT::ppcf128)
718 return RTLIB::LROUND_PPCF128;
719
720 return RTLIB::UNKNOWN_LIBCALL;
721}
722
723RTLIB::Libcall RTLIB::getLLROUND(EVT VT) {
724 if (VT == MVT::f32)
725 return RTLIB::LLROUND_F32;
726 if (VT == MVT::f64)
727 return RTLIB::LLROUND_F64;
728 if (VT == MVT::f80)
729 return RTLIB::LLROUND_F80;
730 if (VT == MVT::f128)
731 return RTLIB::LLROUND_F128;
732 if (VT == MVT::ppcf128)
733 return RTLIB::LLROUND_PPCF128;
734
735 return RTLIB::UNKNOWN_LIBCALL;
736}
737
738RTLIB::Libcall RTLIB::getLRINT(EVT VT) {
739 if (VT == MVT::f32)
740 return RTLIB::LRINT_F32;
741 if (VT == MVT::f64)
742 return RTLIB::LRINT_F64;
743 if (VT == MVT::f80)
744 return RTLIB::LRINT_F80;
745 if (VT == MVT::f128)
746 return RTLIB::LRINT_F128;
747 if (VT == MVT::ppcf128)
748 return RTLIB::LRINT_PPCF128;
749 return RTLIB::UNKNOWN_LIBCALL;
750}
751
752RTLIB::Libcall RTLIB::getLLRINT(EVT VT) {
753 if (VT == MVT::f32)
754 return RTLIB::LLRINT_F32;
755 if (VT == MVT::f64)
756 return RTLIB::LLRINT_F64;
757 if (VT == MVT::f80)
758 return RTLIB::LLRINT_F80;
759 if (VT == MVT::f128)
760 return RTLIB::LLRINT_F128;
761 if (VT == MVT::ppcf128)
762 return RTLIB::LLRINT_PPCF128;
763 return RTLIB::UNKNOWN_LIBCALL;
764}
765
766RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4],
767 AtomicOrdering Order,
768 uint64_t MemSize) {
769 unsigned ModeN, ModelN;
770 switch (MemSize) {
771 case 1:
772 ModeN = 0;
773 break;
774 case 2:
775 ModeN = 1;
776 break;
777 case 4:
778 ModeN = 2;
779 break;
780 case 8:
781 ModeN = 3;
782 break;
783 case 16:
784 ModeN = 4;
785 break;
786 default:
787 return RTLIB::UNKNOWN_LIBCALL;
788 }
789
790 switch (Order) {
792 ModelN = 0;
793 break;
795 ModelN = 1;
796 break;
798 ModelN = 2;
799 break;
802 ModelN = 3;
803 break;
804 default:
805 return UNKNOWN_LIBCALL;
806 }
807
808 return LC[ModeN][ModelN];
809}
810
811RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
812 MVT VT) {
813 if (!VT.isScalarInteger())
814 return UNKNOWN_LIBCALL;
815 uint64_t MemSize = VT.getScalarSizeInBits() / 8;
816
817#define LCALLS(A, B) \
818 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
819#define LCALL5(A) \
820 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
821 switch (Opc) {
823 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
824 return getOutlineAtomicHelper(LC, Order, MemSize);
825 }
826 case ISD::ATOMIC_SWAP: {
827 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
828 return getOutlineAtomicHelper(LC, Order, MemSize);
829 }
831 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
832 return getOutlineAtomicHelper(LC, Order, MemSize);
833 }
834 case ISD::ATOMIC_LOAD_OR: {
835 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
836 return getOutlineAtomicHelper(LC, Order, MemSize);
837 }
839 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
840 return getOutlineAtomicHelper(LC, Order, MemSize);
841 }
843 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
844 return getOutlineAtomicHelper(LC, Order, MemSize);
845 }
846 default:
847 return UNKNOWN_LIBCALL;
848 }
849#undef LCALLS
850#undef LCALL5
851}
852
853RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
854#define OP_TO_LIBCALL(Name, Enum) \
855 case Name: \
856 switch (VT.SimpleTy) { \
857 default: \
858 return UNKNOWN_LIBCALL; \
859 case MVT::i8: \
860 return Enum##_1; \
861 case MVT::i16: \
862 return Enum##_2; \
863 case MVT::i32: \
864 return Enum##_4; \
865 case MVT::i64: \
866 return Enum##_8; \
867 case MVT::i128: \
868 return Enum##_16; \
869 }
870
871 switch (Opc) {
872 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
873 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
874 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
875 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
876 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
877 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
878 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
879 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
880 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
881 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
882 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
883 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
884 }
885
886#undef OP_TO_LIBCALL
887
888 return UNKNOWN_LIBCALL;
889}
890
892 switch (ElementSize) {
893 case 1:
894 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
895 case 2:
896 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
897 case 4:
898 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
899 case 8:
900 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
901 case 16:
902 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
903 default:
904 return UNKNOWN_LIBCALL;
905 }
906}
907
909 switch (ElementSize) {
910 case 1:
911 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
912 case 2:
913 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
914 case 4:
915 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
916 case 8:
917 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
918 case 16:
919 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
920 default:
921 return UNKNOWN_LIBCALL;
922 }
923}
924
926 switch (ElementSize) {
927 case 1:
928 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
929 case 2:
930 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
931 case 4:
932 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
933 case 8:
934 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
935 case 16:
936 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
937 default:
938 return UNKNOWN_LIBCALL;
939 }
940}
941
943 RTLIB::LibcallImpl Impl) const {
944 switch (Impl) {
945 case RTLIB::impl___aeabi_dcmpeq__une:
946 case RTLIB::impl___aeabi_fcmpeq__une:
947 // Usage in the eq case, so we have to invert the comparison.
948 return ISD::SETEQ;
949 case RTLIB::impl___aeabi_dcmpeq__oeq:
950 case RTLIB::impl___aeabi_fcmpeq__oeq:
951 // Normal comparison to boolean value.
952 return ISD::SETNE;
953 case RTLIB::impl___aeabi_dcmplt:
954 case RTLIB::impl___aeabi_dcmple:
955 case RTLIB::impl___aeabi_dcmpge:
956 case RTLIB::impl___aeabi_dcmpgt:
957 case RTLIB::impl___aeabi_dcmpun:
958 case RTLIB::impl___aeabi_fcmplt:
959 case RTLIB::impl___aeabi_fcmple:
960 case RTLIB::impl___aeabi_fcmpge:
961 case RTLIB::impl___aeabi_fcmpgt:
962 /// The AEABI versions return a typical boolean value, so we can compare
963 /// against the integer result as simply != 0.
964 return ISD::SETNE;
965 default:
966 break;
967 }
968
969 // Assume libgcc/compiler-rt behavior. Most of the cases are really aliases of
970 // each other, and return a 3-way comparison style result of -1, 0, or 1
971 // depending on lt/eq/gt.
972 //
973 // FIXME: It would be cleaner to directly express this as a 3-way comparison
974 // soft FP libcall instead of individual compares.
975 RTLIB::Libcall LC = RTLIB::RuntimeLibcallsInfo::getLibcallFromImpl(Impl);
976 switch (LC) {
977 case RTLIB::OEQ_F32:
978 case RTLIB::OEQ_F64:
979 case RTLIB::OEQ_F128:
980 case RTLIB::OEQ_PPCF128:
981 return ISD::SETEQ;
982 case RTLIB::UNE_F32:
983 case RTLIB::UNE_F64:
984 case RTLIB::UNE_F128:
985 case RTLIB::UNE_PPCF128:
986 return ISD::SETNE;
987 case RTLIB::OGE_F32:
988 case RTLIB::OGE_F64:
989 case RTLIB::OGE_F128:
990 case RTLIB::OGE_PPCF128:
991 return ISD::SETGE;
992 case RTLIB::OLT_F32:
993 case RTLIB::OLT_F64:
994 case RTLIB::OLT_F128:
995 case RTLIB::OLT_PPCF128:
996 return ISD::SETLT;
997 case RTLIB::OLE_F32:
998 case RTLIB::OLE_F64:
999 case RTLIB::OLE_F128:
1000 case RTLIB::OLE_PPCF128:
1001 return ISD::SETLE;
1002 case RTLIB::OGT_F32:
1003 case RTLIB::OGT_F64:
1004 case RTLIB::OGT_F128:
1005 case RTLIB::OGT_PPCF128:
1006 return ISD::SETGT;
1007 case RTLIB::UO_F32:
1008 case RTLIB::UO_F64:
1009 case RTLIB::UO_F128:
1010 case RTLIB::UO_PPCF128:
1011 return ISD::SETNE;
1012 default:
1013 llvm_unreachable("not a compare libcall");
1014 }
1015}
1016
1017/// NOTE: The TargetMachine owns TLOF.
1019 const TargetSubtargetInfo &STI)
1020 : TM(tm),
1021 RuntimeLibcallInfo(TM.getTargetTriple(), TM.Options.ExceptionModel,
1022 TM.Options.FloatABIType, TM.Options.EABIVersion,
1023 TM.Options.MCOptions.getABIName(), TM.Options.VecLib),
1024 Libcalls(RuntimeLibcallInfo, STI) {
1025 initActions();
1026
1027 // Perform these initializations only once.
1033 HasExtractBitsInsn = false;
1034 JumpIsExpensive = JumpIsExpensiveOverride;
1036 EnableExtLdPromotion = false;
1037 StackPointerRegisterToSaveRestore = 0;
1038 BooleanContents = UndefinedBooleanContent;
1039 BooleanFloatContents = UndefinedBooleanContent;
1040 BooleanVectorContents = UndefinedBooleanContent;
1041 SchedPreferenceInfo = Sched::ILP;
1044 MaxBytesForAlignment = 0;
1045 MaxAtomicSizeInBitsSupported = 0;
1046
1047 // Assume that even with libcalls, no target supports wider than 128 bit
1048 // division.
1049 MaxDivRemBitWidthSupported = 128;
1050
1051 MaxLargeFPConvertBitWidthSupported = 128;
1052
1053 MinCmpXchgSizeInBits = 0;
1054 SupportsUnalignedAtomics = false;
1055
1056 MinimumBitTestCmps = MinimumBitTestCmpsOverride;
1057}
1058
1059// Define the virtual destructor out-of-line to act as a key method to anchor
1060// debug info (see coding standards).
1062
1064 // All operations default to being supported.
1065 memset(OpActions, 0, sizeof(OpActions));
1066 memset(LoadExtActions, 0, sizeof(LoadExtActions));
1067 memset(AtomicLoadExtActions, 0, sizeof(AtomicLoadExtActions));
1068 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
1069 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
1070 memset(CondCodeActions, 0, sizeof(CondCodeActions));
1071 llvm::fill(RegClassForVT, nullptr);
1072 llvm::fill(TargetDAGCombineArray, 0);
1073
1074 // Let extending atomic loads be unsupported by default.
1075 for (MVT ValVT : MVT::all_valuetypes())
1076 for (MVT MemVT : MVT::all_valuetypes())
1078 Expand);
1079
1080 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
1081 // remove this and targets should individually set these types if not legal.
1084 for (MVT VT : {MVT::i2, MVT::i4})
1085 OpActions[(unsigned)VT.SimpleTy][NT] = Expand;
1086 }
1087 for (MVT AVT : MVT::all_valuetypes()) {
1088 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) {
1089 setTruncStoreAction(AVT, VT, Expand);
1092 }
1093 }
1094 for (unsigned IM = (unsigned)ISD::PRE_INC;
1095 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
1096 for (MVT VT : {MVT::i2, MVT::i4}) {
1101 }
1102 }
1103
1104 for (MVT VT : MVT::fp_valuetypes()) {
1105 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
1106 if (IntVT.isValid()) {
1109 }
1110 }
1111
1112 // If f16 fma is not natively supported, the value must be promoted to an f64
1113 // (and not to f32!) to prevent double rounding issues.
1114 AddPromotedToType(ISD::FMA, MVT::f16, MVT::f64);
1115 AddPromotedToType(ISD::STRICT_FMA, MVT::f16, MVT::f64);
1116
1117 // Set default actions for various operations.
1118 for (MVT VT : MVT::all_valuetypes()) {
1119 // Default all indexed load / store to expand.
1120 for (unsigned IM = (unsigned)ISD::PRE_INC;
1121 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
1126 }
1127
1128 // Most backends expect to see the node which just returns the value loaded.
1130
1131 // These operations default to expand.
1161 VT, Expand);
1162
1163 // Overflow operations default to expand
1166 VT, Expand);
1167
1168 // Carry-using overflow operations default to expand.
1171 VT, Expand);
1172
1173 // ADDC/ADDE/SUBC/SUBE default to expand.
1175 Expand);
1176
1177 // [US]CMP default to expand
1179
1180 // Halving adds
1183 Expand);
1184
1185 // Absolute difference
1187
1188 // Carry-less multiply
1190
1191 // Saturated trunc
1195
1196 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
1198 Expand);
1200
1202
1203 // These library functions default to expand.
1206 VT, Expand);
1207
1208 // These operations default to expand for vector types.
1209 if (VT.isVector())
1215 VT, Expand);
1216
1217 // Constrained floating-point operations default to expand.
1218#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1219 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
1220#include "llvm/IR/ConstrainedOps.def"
1221
1222 // For most targets @llvm.get.dynamic.area.offset just returns 0.
1224
1225 // Vector reduction default to expand.
1233 VT, Expand);
1234
1235 // Named vector shuffles default to expand.
1237 Expand);
1238
1239 // Only some target support this vector operation. Most need to expand it.
1241
1242 // cttz.elts defaults to expand.
1244 Expand);
1245
1246 // VP operations default to expand.
1247#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
1248 setOperationAction(ISD::SDOPC, VT, Expand);
1249#include "llvm/IR/VPIntrinsics.def"
1250
1251 // Masked vector extracts default to expand.
1253
1256
1257 // FP environment operations default to expand.
1261
1263
1268 }
1269
1270 // Most targets ignore the @llvm.prefetch intrinsic.
1272
1273 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
1275
1276 // Most targets also ignore the @llvm.readsteadycounter intrinsic.
1278
1279 // ConstantFP nodes default to expand. Targets can either change this to
1280 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
1281 // to optimize expansions for certain constants.
1283 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
1284 Expand);
1285
1286 // Insert custom handling default for llvm.canonicalize.*.
1288 {MVT::f16, MVT::f32, MVT::f64, MVT::f128}, Expand);
1289
1290 // FIXME: Query RuntimeLibCalls to make the decision.
1292 {MVT::f32, MVT::f64, MVT::f128}, LibCall);
1293
1296 MVT::f16, Promote);
1297 // Default ISD::TRAP to expand (which turns it into abort).
1298 setOperationAction(ISD::TRAP, MVT::Other, Expand);
1299
1300 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
1301 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
1303
1305
1308
1309 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
1312 }
1314
1315 // This one by default will call __clear_cache unless the target
1316 // wants something different.
1318
1319 // By default, STACKADDRESS nodes are expanded like STACKSAVE nodes.
1320 // On SPARC targets, custom lowering is required.
1322}
1323
1325 EVT) const {
1326 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
1327}
1328
1330 const DataLayout &DL) const {
1331 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
1332 if (LHSTy.isVector())
1333 return LHSTy;
1334 MVT ShiftVT = getScalarShiftAmountTy(DL, LHSTy);
1335 // If any possible shift value won't fit in the prefered type, just use
1336 // something safe. Assume it will be legalized when the shift is expanded.
1337 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits()))
1338 ShiftVT = MVT::i32;
1339 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) &&
1340 "ShiftVT is still too small!");
1341 return ShiftVT;
1342}
1343
1344bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
1345 assert(isTypeLegal(VT));
1346 switch (Op) {
1347 default:
1348 return false;
1349 case ISD::SDIV:
1350 case ISD::UDIV:
1351 case ISD::SREM:
1352 case ISD::UREM:
1353 return true;
1354 }
1355}
1356
1358 unsigned DestAS) const {
1359 return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
1360}
1361
1363 EVT RetVT, ElementCount EC, bool ZeroIsPoison,
1364 const ConstantRange *VScaleRange) const {
1365 // Find the smallest "sensible" element type to use for the expansion.
1366 ConstantRange CR(APInt(64, EC.getKnownMinValue()));
1367 if (EC.isScalable())
1368 CR = CR.umul_sat(*VScaleRange);
1369
1370 if (ZeroIsPoison)
1371 CR = CR.subtract(APInt(64, 1));
1372
1373 unsigned EltWidth = RetVT.getScalarSizeInBits();
1374 EltWidth = std::min(EltWidth, CR.getActiveBits());
1375 EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
1376
1377 return EltWidth;
1378}
1379
1381 // If the command-line option was specified, ignore this request.
1382 if (!JumpIsExpensiveOverride.getNumOccurrences())
1383 JumpIsExpensive = isExpensive;
1384}
1385
1388 // If this is a simple type, use the ComputeRegisterProp mechanism.
1389 if (VT.isSimple()) {
1390 MVT SVT = VT.getSimpleVT();
1391 assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
1392 MVT NVT = TransformToType[SVT.SimpleTy];
1393 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1394
1395 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1396 LA == TypeSoftPromoteHalf ||
1397 (NVT.isVector() ||
1398 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
1399 "Promote may not follow Expand or Promote");
1400
1401 if (LA == TypeSplitVector)
1402 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
1403 if (LA == TypeScalarizeVector)
1404 return LegalizeKind(LA, SVT.getVectorElementType());
1405 return LegalizeKind(LA, NVT);
1406 }
1407
1408 // Handle Extended Scalar Types.
1409 if (!VT.isVector()) {
1410 assert(VT.isInteger() && "Float types must be simple");
1411 unsigned BitSize = VT.getSizeInBits();
1412 // First promote to a power-of-two size, then expand if necessary.
1413 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1414 EVT NVT = VT.getRoundIntegerType(Context);
1415 assert(NVT != VT && "Unable to round integer VT");
1416 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1417 // Avoid multi-step promotion.
1418 if (NextStep.first == TypePromoteInteger)
1419 return NextStep;
1420 // Return rounded integer type.
1421 return LegalizeKind(TypePromoteInteger, NVT);
1422 }
1423
1425 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
1426 }
1427
1428 // Handle vector types.
1429 ElementCount NumElts = VT.getVectorElementCount();
1430 EVT EltVT = VT.getVectorElementType();
1431
1432 // Vectors with only one element are always scalarized.
1433 if (NumElts.isScalar())
1434 return LegalizeKind(TypeScalarizeVector, EltVT);
1435
1436 // Try to widen vector elements until the element type is a power of two and
1437 // promote it to a legal type later on, for example:
1438 // <3 x i8> -> <4 x i8> -> <4 x i32>
1439 if (EltVT.isInteger()) {
1440 // Vectors with a number of elements that is not a power of two are always
1441 // widened, for example <3 x i8> -> <4 x i8>.
1442 if (!VT.isPow2VectorType()) {
1443 NumElts = NumElts.coefficientNextPowerOf2();
1444 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1445 return LegalizeKind(TypeWidenVector, NVT);
1446 }
1447
1448 // Examine the element type.
1449 LegalizeKind LK = getTypeConversion(Context, EltVT);
1450
1451 // If type is to be expanded, split the vector.
1452 // <4 x i140> -> <2 x i140>
1453 if (LK.first == TypeExpandInteger) {
1454 if (NumElts.isScalable() && NumElts.getKnownMinValue() == 1)
1457 VT.getHalfNumVectorElementsVT(Context));
1458 }
1459
1460 // Promote the integer element types until a legal vector type is found
1461 // or until the element integer type is too big. If a legal type was not
1462 // found, fallback to the usual mechanism of widening/splitting the
1463 // vector.
1464 EVT OldEltVT = EltVT;
1465 while (true) {
1466 // Increase the bitwidth of the element to the next pow-of-two
1467 // (which is greater than 8 bits).
1468 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1469 .getRoundIntegerType(Context);
1470
1471 // Stop trying when getting a non-simple element type.
1472 // Note that vector elements may be greater than legal vector element
1473 // types. Example: X86 XMM registers hold 64bit element on 32bit
1474 // systems.
1475 if (!EltVT.isSimple())
1476 break;
1477
1478 // Build a new vector type and check if it is legal.
1479 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1480 // Found a legal promoted vector type.
1481 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1483 EVT::getVectorVT(Context, EltVT, NumElts));
1484 }
1485
1486 // Reset the type to the unexpanded type if we did not find a legal vector
1487 // type with a promoted vector element type.
1488 EltVT = OldEltVT;
1489 }
1490
1491 // Try to widen the vector until a legal type is found.
1492 // If there is no wider legal type, split the vector.
1493 while (true) {
1494 // Round up to the next power of 2.
1495 NumElts = NumElts.coefficientNextPowerOf2();
1496
1497 // If there is no simple vector type with this many elements then there
1498 // cannot be a larger legal vector type. Note that this assumes that
1499 // there are no skipped intermediate vector types in the simple types.
1500 if (!EltVT.isSimple())
1501 break;
1502 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1503 if (LargerVector == MVT())
1504 break;
1505
1506 // If this type is legal then widen the vector.
1507 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1508 return LegalizeKind(TypeWidenVector, LargerVector);
1509 }
1510
1511 // Widen odd vectors to next power of two.
1512 if (!VT.isPow2VectorType()) {
1513 EVT NVT = VT.getPow2VectorType(Context);
1514 return LegalizeKind(TypeWidenVector, NVT);
1515 }
1516
1519
1520 // Vectors with illegal element types are expanded.
1521 EVT NVT = EVT::getVectorVT(Context, EltVT,
1523 return LegalizeKind(TypeSplitVector, NVT);
1524}
1525
1526static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1527 unsigned &NumIntermediates,
1528 MVT &RegisterVT,
1529 TargetLoweringBase *TLI) {
1530 // Figure out the right, legal destination reg to copy into.
1532 MVT EltTy = VT.getVectorElementType();
1533
1534 unsigned NumVectorRegs = 1;
1535
1536 // Scalable vectors cannot be scalarized, so splitting or widening is
1537 // required.
1538 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1540 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1541
1542 // FIXME: We don't support non-power-of-2-sized vectors for now.
1543 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1544 if (!isPowerOf2_32(EC.getKnownMinValue())) {
1545 // Split EC to unit size (scalable property is preserved).
1546 NumVectorRegs = EC.getKnownMinValue();
1547 EC = ElementCount::getFixed(1);
1548 }
1549
1550 // Divide the input until we get to a supported size. This will
1551 // always end up with an EC that represent a scalar or a scalable
1552 // scalar.
1553 while (EC.getKnownMinValue() > 1 &&
1554 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1555 EC = EC.divideCoefficientBy(2);
1556 NumVectorRegs <<= 1;
1557 }
1558
1559 NumIntermediates = NumVectorRegs;
1560
1561 MVT NewVT = MVT::getVectorVT(EltTy, EC);
1562 if (!TLI->isTypeLegal(NewVT))
1563 NewVT = EltTy;
1564 IntermediateVT = NewVT;
1565
1566 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1567
1568 // Convert sizes such as i33 to i64.
1569 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits);
1570
1571 MVT DestVT = TLI->getRegisterType(NewVT);
1572 RegisterVT = DestVT;
1573 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1574 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1575
1576 // Otherwise, promotion or legal types use the same number of registers as
1577 // the vector decimated to the appropriate level.
1578 return NumVectorRegs;
1579}
1580
1581/// isLegalRC - Return true if the value types that can be represented by the
1582/// specified register class are all legal.
1584 const TargetRegisterClass &RC) const {
1585 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1586 if (isTypeLegal(*I))
1587 return true;
1588 return false;
1589}
1590
1591/// Replace/modify any TargetFrameIndex operands with a targte-dependent
1592/// sequence of memory operands that is recognized by PrologEpilogInserter.
1595 MachineBasicBlock *MBB) const {
1596 MachineInstr *MI = &InitialMI;
1597 MachineFunction &MF = *MI->getMF();
1598 MachineFrameInfo &MFI = MF.getFrameInfo();
1599
1600 // We're handling multiple types of operands here:
1601 // PATCHPOINT MetaArgs - live-in, read only, direct
1602 // STATEPOINT Deopt Spill - live-through, read only, indirect
1603 // STATEPOINT Deopt Alloca - live-through, read only, direct
1604 // (We're currently conservative and mark the deopt slots read/write in
1605 // practice.)
1606 // STATEPOINT GC Spill - live-through, read/write, indirect
1607 // STATEPOINT GC Alloca - live-through, read/write, direct
1608 // The live-in vs live-through is handled already (the live through ones are
1609 // all stack slots), but we need to handle the different type of stackmap
1610 // operands and memory effects here.
1611
1612 if (llvm::none_of(MI->operands(),
1613 [](MachineOperand &Operand) { return Operand.isFI(); }))
1614 return MBB;
1615
1616 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1617
1618 // Inherit previous memory operands.
1619 MIB.cloneMemRefs(*MI);
1620
1621 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1622 MachineOperand &MO = MI->getOperand(i);
1623 if (!MO.isFI()) {
1624 // Index of Def operand this Use it tied to.
1625 // Since Defs are coming before Uses, if Use is tied, then
1626 // index of Def must be smaller that index of that Use.
1627 // Also, Defs preserve their position in new MI.
1628 unsigned TiedTo = i;
1629 if (MO.isReg() && MO.isTied())
1630 TiedTo = MI->findTiedOperandIdx(i);
1631 MIB.add(MO);
1632 if (TiedTo < i)
1633 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1634 continue;
1635 }
1636
1637 // foldMemoryOperand builds a new MI after replacing a single FI operand
1638 // with the canonical set of five x86 addressing-mode operands.
1639 int FI = MO.getIndex();
1640
1641 // Add frame index operands recognized by stackmaps.cpp
1643 // indirect-mem-ref tag, size, #FI, offset.
1644 // Used for spills inserted by StatepointLowering. This codepath is not
1645 // used for patchpoints/stackmaps at all, for these spilling is done via
1646 // foldMemoryOperand callback only.
1647 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1648 MIB.addImm(StackMaps::IndirectMemRefOp);
1649 MIB.addImm(MFI.getObjectSize(FI));
1650 MIB.add(MO);
1651 MIB.addImm(0);
1652 } else {
1653 // direct-mem-ref tag, #FI, offset.
1654 // Used by patchpoint, and direct alloca arguments to statepoints
1655 MIB.addImm(StackMaps::DirectMemRefOp);
1656 MIB.add(MO);
1657 MIB.addImm(0);
1658 }
1659
1660 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1661
1662 // Add a new memory operand for this FI.
1663 assert(MFI.getObjectOffset(FI) != -1);
1664
1665 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1666 // PATCHPOINT should be updated to do the same. (TODO)
1667 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1668 auto Flags = MachineMemOperand::MOLoad;
1670 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1672 MIB->addMemOperand(MF, MMO);
1673 }
1674 }
1675 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1676 MI->eraseFromParent();
1677 return MBB;
1678}
1679
1680/// findRepresentativeClass - Return the largest legal super-reg register class
1681/// of the register class for the specified type and its associated "cost".
1682// This function is in TargetLowering because it uses RegClassForVT which would
1683// need to be moved to TargetRegisterInfo and would necessitate moving
1684// isTypeLegal over as well - a massive change that would just require
1685// TargetLowering having a TargetRegisterInfo class member that it would use.
1686std::pair<const TargetRegisterClass *, uint8_t>
1688 MVT VT) const {
1689 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1690 if (!RC)
1691 return std::make_pair(RC, 0);
1692
1693 // Compute the set of all super-register classes.
1694 BitVector SuperRegRC(TRI->getNumRegClasses());
1695 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1696 SuperRegRC.setBitsInMask(RCI.getMask());
1697
1698 // Find the first legal register class with the largest spill size.
1699 const TargetRegisterClass *BestRC = RC;
1700 for (unsigned i : SuperRegRC.set_bits()) {
1701 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1702 // We want the largest possible spill size.
1703 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1704 continue;
1705 if (!isLegalRC(*TRI, *SuperRC))
1706 continue;
1707 BestRC = SuperRC;
1708 }
1709 return std::make_pair(BestRC, 1);
1710}
1711
1712/// computeRegisterProperties - Once all of the register classes are added,
1713/// this allows us to compute derived properties we expose.
1715 const TargetRegisterInfo *TRI) {
1716 // Everything defaults to needing one register.
1717 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1718 NumRegistersForVT[i] = 1;
1719 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1720 }
1721 // ...except isVoid, which doesn't need any registers.
1722 NumRegistersForVT[MVT::isVoid] = 0;
1723
1724 // Find the largest integer register class.
1725 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1726 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1727 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1728
1729 // Every integer value type larger than this largest register takes twice as
1730 // many registers to represent as the previous ValueType.
1731 for (unsigned ExpandedReg = LargestIntReg + 1;
1732 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1733 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1734 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1735 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1736 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1738 }
1739
1740 // Inspect all of the ValueType's smaller than the largest integer
1741 // register to see which ones need promotion.
1742 unsigned LegalIntReg = LargestIntReg;
1743 for (unsigned IntReg = LargestIntReg - 1;
1744 IntReg >= (unsigned)MVT::i1; --IntReg) {
1745 MVT IVT = (MVT::SimpleValueType)IntReg;
1746 if (isTypeLegal(IVT)) {
1747 LegalIntReg = IntReg;
1748 } else {
1749 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1750 (MVT::SimpleValueType)LegalIntReg;
1751 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1752 }
1753 }
1754
1755 // ppcf128 type is really two f64's.
1756 if (!isTypeLegal(MVT::ppcf128)) {
1757 if (isTypeLegal(MVT::f64)) {
1758 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1759 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1760 TransformToType[MVT::ppcf128] = MVT::f64;
1761 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1762 } else {
1763 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1764 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1765 TransformToType[MVT::ppcf128] = MVT::i128;
1766 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1767 }
1768 }
1769
1770 // Decide how to handle f128. If the target does not have native f128 support,
1771 // expand it to i128 and we will be generating soft float library calls.
1772 if (!isTypeLegal(MVT::f128)) {
1773 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1774 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1775 TransformToType[MVT::f128] = MVT::i128;
1776 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1777 }
1778
1779 // Decide how to handle f80. If the target does not have native f80 support,
1780 // expand it to i96 and we will be generating soft float library calls.
1781 if (!isTypeLegal(MVT::f80)) {
1782 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32];
1783 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32];
1784 TransformToType[MVT::f80] = MVT::i32;
1785 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat);
1786 }
1787
1788 // Decide how to handle f64. If the target does not have native f64 support,
1789 // expand it to i64 and we will be generating soft float library calls.
1790 if (!isTypeLegal(MVT::f64)) {
1791 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1792 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1793 TransformToType[MVT::f64] = MVT::i64;
1794 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1795 }
1796
1797 // Decide how to handle f32. If the target does not have native f32 support,
1798 // expand it to i32 and we will be generating soft float library calls.
1799 if (!isTypeLegal(MVT::f32)) {
1800 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1801 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1802 TransformToType[MVT::f32] = MVT::i32;
1803 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1804 }
1805
1806 // Decide how to handle f16. If the target does not have native f16 support,
1807 // promote it to f32, because there are no f16 library calls (except for
1808 // conversions).
1809 if (!isTypeLegal(MVT::f16)) {
1810 // Allow targets to control how we legalize half.
1811 bool UseFPRegsForHalfType = useFPRegsForHalfType();
1812
1813 if (!UseFPRegsForHalfType) {
1814 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1815 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1816 } else {
1817 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1818 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1819 }
1820 TransformToType[MVT::f16] = MVT::f32;
1821 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1822 }
1823
1824 // Decide how to handle bf16. If the target does not have native bf16 support,
1825 // promote it to f32, because there are no bf16 library calls (except for
1826 // converting from f32 to bf16).
1827 if (!isTypeLegal(MVT::bf16)) {
1828 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32];
1829 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32];
1830 TransformToType[MVT::bf16] = MVT::f32;
1831 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf);
1832 }
1833
1834 // Loop over all of the vector value types to see which need transformations.
1835 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1836 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1837 MVT VT = (MVT::SimpleValueType) i;
1838 if (isTypeLegal(VT))
1839 continue;
1840
1841 MVT EltVT = VT.getVectorElementType();
1843 bool IsLegalWiderType = false;
1844 bool IsScalable = VT.isScalableVector();
1845 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1846 switch (PreferredAction) {
1847 case TypePromoteInteger: {
1848 MVT::SimpleValueType EndVT = IsScalable ?
1849 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1850 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1851 // Try to promote the elements of integer vectors. If no legal
1852 // promotion was found, fall through to the widen-vector method.
1853 for (unsigned nVT = i + 1;
1854 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1855 MVT SVT = (MVT::SimpleValueType) nVT;
1856 // Promote vectors of integers to vectors with the same number
1857 // of elements, with a wider element type.
1858 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1859 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1860 TransformToType[i] = SVT;
1861 RegisterTypeForVT[i] = SVT;
1862 NumRegistersForVT[i] = 1;
1863 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1864 IsLegalWiderType = true;
1865 break;
1866 }
1867 }
1868 if (IsLegalWiderType)
1869 break;
1870 [[fallthrough]];
1871 }
1872
1873 case TypeWidenVector:
1874 if (isPowerOf2_32(EC.getKnownMinValue())) {
1875 // Try to widen the vector.
1876 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1877 MVT SVT = (MVT::SimpleValueType) nVT;
1878 if (SVT.getVectorElementType() == EltVT &&
1879 SVT.isScalableVector() == IsScalable &&
1881 EC.getKnownMinValue() &&
1882 isTypeLegal(SVT)) {
1883 TransformToType[i] = SVT;
1884 RegisterTypeForVT[i] = SVT;
1885 NumRegistersForVT[i] = 1;
1886 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1887 IsLegalWiderType = true;
1888 break;
1889 }
1890 }
1891 if (IsLegalWiderType)
1892 break;
1893 } else {
1894 // Only widen to the next power of 2 to keep consistency with EVT.
1895 MVT NVT = VT.getPow2VectorType();
1896 if (isTypeLegal(NVT)) {
1897 TransformToType[i] = NVT;
1898 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1899 RegisterTypeForVT[i] = NVT;
1900 NumRegistersForVT[i] = 1;
1901 break;
1902 }
1903 }
1904 [[fallthrough]];
1905
1906 case TypeSplitVector:
1907 case TypeScalarizeVector: {
1908 MVT IntermediateVT;
1909 MVT RegisterVT;
1910 unsigned NumIntermediates;
1911 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1912 NumIntermediates, RegisterVT, this);
1913 NumRegistersForVT[i] = NumRegisters;
1914 assert(NumRegistersForVT[i] == NumRegisters &&
1915 "NumRegistersForVT size cannot represent NumRegisters!");
1916 RegisterTypeForVT[i] = RegisterVT;
1917
1918 MVT NVT = VT.getPow2VectorType();
1919 if (NVT == VT) {
1920 // Type is already a power of 2. The default action is to split.
1921 TransformToType[i] = MVT::Other;
1922 if (PreferredAction == TypeScalarizeVector)
1923 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1924 else if (PreferredAction == TypeSplitVector)
1925 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1926 else if (EC.getKnownMinValue() > 1)
1927 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1928 else
1929 ValueTypeActions.setTypeAction(VT, EC.isScalable()
1932 } else {
1933 TransformToType[i] = NVT;
1934 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1935 }
1936 break;
1937 }
1938 default:
1939 llvm_unreachable("Unknown vector legalization action!");
1940 }
1941 }
1942
1943 // Determine the 'representative' register class for each value type.
1944 // An representative register class is the largest (meaning one which is
1945 // not a sub-register class / subreg register class) legal register class for
1946 // a group of value types. For example, on i386, i8, i16, and i32
1947 // representative would be GR32; while on x86_64 it's GR64.
1948 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1949 const TargetRegisterClass* RRC;
1950 uint8_t Cost;
1952 RepRegClassForVT[i] = RRC;
1953 RepRegClassCostForVT[i] = Cost;
1954 }
1955
1956 // Compute minimum known-legal store size.
1957 MaximumLegalStoreInBits = 0;
1958 for (MVT VT : MVT::all_valuetypes())
1959 if (VT != MVT::Other && isTypeLegal(VT) &&
1960 VT.getSizeInBits().getKnownMinValue() >= MaximumLegalStoreInBits)
1961 MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinValue();
1962}
1963
1965 EVT VT) const {
1966 assert(!VT.isVector() && "No default SetCC type for vectors!");
1967 return getPointerTy(DL).SimpleTy;
1968}
1969
1970/// getVectorTypeBreakdown - Vector types are broken down into some number of
1971/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1972/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1973/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1974///
1975/// This method returns the number of registers needed, and the VT for each
1976/// register. It also returns the VT and quantity of the intermediate values
1977/// before they are promoted/expanded.
1979 EVT VT, EVT &IntermediateVT,
1980 unsigned &NumIntermediates,
1981 MVT &RegisterVT) const {
1982 ElementCount EltCnt = VT.getVectorElementCount();
1983
1984 // If there is a wider vector type with the same element type as this one,
1985 // or a promoted vector type that has the same number of elements which
1986 // are wider, then we should convert to that legal vector type.
1987 // This handles things like <2 x float> -> <4 x float> and
1988 // <4 x i1> -> <4 x i32>.
1989 LegalizeTypeAction TA = getTypeAction(Context, VT);
1990 if (!EltCnt.isScalar() &&
1991 (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1992 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1993 if (isTypeLegal(RegisterEVT)) {
1994 IntermediateVT = RegisterEVT;
1995 RegisterVT = RegisterEVT.getSimpleVT();
1996 NumIntermediates = 1;
1997 return 1;
1998 }
1999 }
2000
2001 // Figure out the right, legal destination reg to copy into.
2002 EVT EltTy = VT.getVectorElementType();
2003
2004 unsigned NumVectorRegs = 1;
2005
2006 // Scalable vectors cannot be scalarized, so handle the legalisation of the
2007 // types like done elsewhere in SelectionDAG.
2008 if (EltCnt.isScalable()) {
2009 LegalizeKind LK;
2010 EVT PartVT = VT;
2011 do {
2012 // Iterate until we've found a legal (part) type to hold VT.
2013 LK = getTypeConversion(Context, PartVT);
2014 PartVT = LK.second;
2015 } while (LK.first != TypeLegal);
2016
2017 if (!PartVT.isVector()) {
2019 "Don't know how to legalize this scalable vector type");
2020 }
2021
2022 NumIntermediates =
2025 IntermediateVT = PartVT;
2026 RegisterVT = getRegisterType(Context, IntermediateVT);
2027 return NumIntermediates;
2028 }
2029
2030 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
2031 // we could break down into LHS/RHS like LegalizeDAG does.
2032 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
2033 NumVectorRegs = EltCnt.getKnownMinValue();
2034 EltCnt = ElementCount::getFixed(1);
2035 }
2036
2037 // Divide the input until we get to a supported size. This will always
2038 // end with a scalar if the target doesn't support vectors.
2039 while (EltCnt.getKnownMinValue() > 1 &&
2040 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
2041 EltCnt = EltCnt.divideCoefficientBy(2);
2042 NumVectorRegs <<= 1;
2043 }
2044
2045 NumIntermediates = NumVectorRegs;
2046
2047 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
2048 if (!isTypeLegal(NewVT))
2049 NewVT = EltTy;
2050 IntermediateVT = NewVT;
2051
2052 MVT DestVT = getRegisterType(Context, NewVT);
2053 RegisterVT = DestVT;
2054
2055 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
2056 TypeSize NewVTSize = NewVT.getSizeInBits();
2057 // Convert sizes such as i33 to i64.
2059 NewVTSize = NewVTSize.coefficientNextPowerOf2();
2060 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
2061 }
2062
2063 // Otherwise, promotion or legal types use the same number of registers as
2064 // the vector decimated to the appropriate level.
2065 return NumVectorRegs;
2066}
2067
2069 uint64_t NumCases,
2071 ProfileSummaryInfo *PSI,
2072 BlockFrequencyInfo *BFI) const {
2073 // FIXME: This function check the maximum table size and density, but the
2074 // minimum size is not checked. It would be nice if the minimum size is
2075 // also combined within this function. Currently, the minimum size check is
2076 // performed in findJumpTable() in SelectionDAGBuiler and
2077 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
2078 const bool OptForSize =
2079 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
2080 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
2081 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
2082
2083 // Check whether the number of cases is small enough and
2084 // the range is dense enough for a jump table.
2085 return (OptForSize || Range <= MaxJumpTableSize) &&
2086 (NumCases * 100 >= Range * MinDensity);
2087}
2088
2090 EVT ConditionVT) const {
2091 return getRegisterType(Context, ConditionVT);
2092}
2093
2094/// Get the EVTs and ArgFlags collections that represent the legalized return
2095/// type of the given function. This does not require a DAG or a return value,
2096/// and is suitable for use before any DAGs for the function are constructed.
2097/// TODO: Move this out of TargetLowering.cpp.
2099 AttributeList attr,
2101 const TargetLowering &TLI, const DataLayout &DL) {
2103 ComputeValueTypes(DL, ReturnType, Types);
2104 unsigned NumValues = Types.size();
2105 if (NumValues == 0) return;
2106
2107 for (Type *Ty : Types) {
2108 EVT VT = TLI.getValueType(DL, Ty);
2109 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2110
2111 if (attr.hasRetAttr(Attribute::SExt))
2112 ExtendKind = ISD::SIGN_EXTEND;
2113 else if (attr.hasRetAttr(Attribute::ZExt))
2114 ExtendKind = ISD::ZERO_EXTEND;
2115
2116 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2117 VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind);
2118
2119 unsigned NumParts =
2120 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
2121 MVT PartVT =
2122 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
2123
2124 // 'inreg' on function refers to return value
2126 if (attr.hasRetAttr(Attribute::InReg))
2127 Flags.setInReg();
2128
2129 // Propagate extension type if any
2130 if (attr.hasRetAttr(Attribute::SExt))
2131 Flags.setSExt();
2132 else if (attr.hasRetAttr(Attribute::ZExt))
2133 Flags.setZExt();
2134
2135 for (unsigned i = 0; i < NumParts; ++i)
2136 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, Ty, 0, 0));
2137 }
2138}
2139
2141 const DataLayout &DL) const {
2142 return DL.getABITypeAlign(Ty);
2143}
2144
2146 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
2147 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
2148 // Check if the specified alignment is sufficient based on the data layout.
2149 // TODO: While using the data layout works in practice, a better solution
2150 // would be to implement this check directly (make this a virtual function).
2151 // For example, the ABI alignment may change based on software platform while
2152 // this function should only be affected by hardware implementation.
2153 Type *Ty = VT.getTypeForEVT(Context);
2154 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
2155 // Assume that an access that meets the ABI-specified alignment is fast.
2156 if (Fast != nullptr)
2157 *Fast = 1;
2158 return true;
2159 }
2160
2161 // This is a misaligned access.
2162 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
2163}
2164
2166 LLVMContext &Context, const DataLayout &DL, EVT VT,
2167 const MachineMemOperand &MMO, unsigned *Fast) const {
2168 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
2169 MMO.getAlign(), MMO.getFlags(), Fast);
2170}
2171
2173 const DataLayout &DL, EVT VT,
2174 unsigned AddrSpace, Align Alignment,
2176 unsigned *Fast) const {
2177 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
2178 Flags, Fast);
2179}
2180
2182 const DataLayout &DL, EVT VT,
2183 const MachineMemOperand &MMO,
2184 unsigned *Fast) const {
2185 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
2186 MMO.getFlags(), Fast);
2187}
2188
2190 const DataLayout &DL, LLT Ty,
2191 const MachineMemOperand &MMO,
2192 unsigned *Fast) const {
2193 EVT VT = getApproximateEVTForLLT(Ty, Context);
2194 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
2195 MMO.getFlags(), Fast);
2196}
2197
2198unsigned TargetLoweringBase::getMaxStoresPerMemset(bool OptSize) const {
2201
2203}
2204
2205unsigned TargetLoweringBase::getMaxStoresPerMemcpy(bool OptSize) const {
2208
2210}
2211
2215
2217}
2218
2219//===----------------------------------------------------------------------===//
2220// TargetTransformInfo Helpers
2221//===----------------------------------------------------------------------===//
2222
2224 enum InstructionOpcodes {
2225#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
2226#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
2227#include "llvm/IR/Instruction.def"
2228 };
2229 switch (static_cast<InstructionOpcodes>(Opcode)) {
2230 case Ret: return 0;
2231 case UncondBr: return 0;
2232 case CondBr: return 0;
2233 case Switch: return 0;
2234 case IndirectBr: return 0;
2235 case Invoke: return 0;
2236 case CallBr: return 0;
2237 case Resume: return 0;
2238 case Unreachable: return 0;
2239 case CleanupRet: return 0;
2240 case CatchRet: return 0;
2241 case CatchPad: return 0;
2242 case CatchSwitch: return 0;
2243 case CleanupPad: return 0;
2244 case FNeg: return ISD::FNEG;
2245 case Add: return ISD::ADD;
2246 case FAdd: return ISD::FADD;
2247 case Sub: return ISD::SUB;
2248 case FSub: return ISD::FSUB;
2249 case Mul: return ISD::MUL;
2250 case FMul: return ISD::FMUL;
2251 case UDiv: return ISD::UDIV;
2252 case SDiv: return ISD::SDIV;
2253 case FDiv: return ISD::FDIV;
2254 case URem: return ISD::UREM;
2255 case SRem: return ISD::SREM;
2256 case FRem: return ISD::FREM;
2257 case Shl: return ISD::SHL;
2258 case LShr: return ISD::SRL;
2259 case AShr: return ISD::SRA;
2260 case And: return ISD::AND;
2261 case Or: return ISD::OR;
2262 case Xor: return ISD::XOR;
2263 case Alloca: return 0;
2264 case Load: return ISD::LOAD;
2265 case Store: return ISD::STORE;
2266 case GetElementPtr: return 0;
2267 case Fence: return 0;
2268 case AtomicCmpXchg: return 0;
2269 case AtomicRMW: return 0;
2270 case Trunc: return ISD::TRUNCATE;
2271 case ZExt: return ISD::ZERO_EXTEND;
2272 case SExt: return ISD::SIGN_EXTEND;
2273 case FPToUI: return ISD::FP_TO_UINT;
2274 case FPToSI: return ISD::FP_TO_SINT;
2275 case UIToFP: return ISD::UINT_TO_FP;
2276 case SIToFP: return ISD::SINT_TO_FP;
2277 case FPTrunc: return ISD::FP_ROUND;
2278 case FPExt: return ISD::FP_EXTEND;
2279 case PtrToAddr: return ISD::BITCAST;
2280 case PtrToInt: return ISD::BITCAST;
2281 case IntToPtr: return ISD::BITCAST;
2282 case BitCast: return ISD::BITCAST;
2283 case AddrSpaceCast: return ISD::ADDRSPACECAST;
2284 case ICmp: return ISD::SETCC;
2285 case FCmp: return ISD::SETCC;
2286 case PHI: return 0;
2287 case Call: return 0;
2288 case Select: return ISD::SELECT;
2289 case UserOp1: return 0;
2290 case UserOp2: return 0;
2291 case VAArg: return 0;
2292 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
2293 case InsertElement: return ISD::INSERT_VECTOR_ELT;
2294 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
2295 case ExtractValue: return ISD::MERGE_VALUES;
2296 case InsertValue: return ISD::MERGE_VALUES;
2297 case LandingPad: return 0;
2298 case Freeze: return ISD::FREEZE;
2299 }
2300
2301 llvm_unreachable("Unknown instruction type encountered!");
2302}
2303
2305 switch (ID) {
2306 case Intrinsic::acos:
2307 return ISD::FACOS;
2308 case Intrinsic::asin:
2309 return ISD::FASIN;
2310 case Intrinsic::atan:
2311 return ISD::FATAN;
2312 case Intrinsic::cos:
2313 return ISD::FCOS;
2314 case Intrinsic::cosh:
2315 return ISD::FCOSH;
2316 case Intrinsic::exp:
2317 return ISD::FEXP;
2318 case Intrinsic::exp2:
2319 return ISD::FEXP2;
2320 case Intrinsic::exp10:
2321 return ISD::FEXP10;
2322 case Intrinsic::log:
2323 return ISD::FLOG;
2324 case Intrinsic::log2:
2325 return ISD::FLOG2;
2326 case Intrinsic::log10:
2327 return ISD::FLOG10;
2328 case Intrinsic::sin:
2329 return ISD::FSIN;
2330 case Intrinsic::sinh:
2331 return ISD::FSINH;
2332 case Intrinsic::tan:
2333 return ISD::FTAN;
2334 case Intrinsic::tanh:
2335 return ISD::FTANH;
2336 default:
2337 return ISD::DELETED_NODE;
2338 }
2339}
2340
2341Value *
2343 bool UseTLS) const {
2344 // compiler-rt provides a variable with a magic name. Targets that do not
2345 // link with compiler-rt may also provide such a variable.
2346 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2347
2348 RTLIB::LibcallImpl UnsafeStackPtrImpl =
2349 Libcalls.getLibcallImpl(RTLIB::SAFESTACK_UNSAFE_STACK_PTR);
2350 if (UnsafeStackPtrImpl == RTLIB::Unsupported)
2351 return nullptr;
2352
2353 StringRef UnsafeStackPtrVar =
2355 auto UnsafeStackPtr =
2356 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
2357
2358 const DataLayout &DL = M->getDataLayout();
2359 PointerType *StackPtrTy = DL.getAllocaPtrType(M->getContext());
2360
2361 if (!UnsafeStackPtr) {
2362 auto TLSModel = UseTLS ?
2365 // The global variable is not defined yet, define it ourselves.
2366 // We use the initial-exec TLS model because we do not support the
2367 // variable living anywhere other than in the main executable.
2368 UnsafeStackPtr = new GlobalVariable(
2369 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
2370 UnsafeStackPtrVar, nullptr, TLSModel);
2371 } else {
2372 // The variable exists, check its type and attributes.
2373 //
2374 // FIXME: Move to IR verifier.
2375 if (UnsafeStackPtr->getValueType() != StackPtrTy)
2376 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
2377 if (UseTLS != UnsafeStackPtr->isThreadLocal())
2378 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
2379 (UseTLS ? "" : "not ") + "be thread-local");
2380 }
2381 return UnsafeStackPtr;
2382}
2383
2385 IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const {
2386 RTLIB::LibcallImpl SafestackPointerAddressImpl =
2387 Libcalls.getLibcallImpl(RTLIB::SAFESTACK_POINTER_ADDRESS);
2388 if (SafestackPointerAddressImpl == RTLIB::Unsupported)
2389 return getDefaultSafeStackPointerLocation(IRB, true);
2390
2391 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2392 auto *PtrTy = PointerType::getUnqual(M->getContext());
2393
2394 // Android provides a libc function to retrieve the address of the current
2395 // thread's unsafe stack pointer.
2396 FunctionCallee Fn =
2398 SafestackPointerAddressImpl),
2399 PtrTy);
2400 return IRB.CreateCall(Fn);
2401}
2402
2403//===----------------------------------------------------------------------===//
2404// Loop Strength Reduction hooks
2405//===----------------------------------------------------------------------===//
2406
2407/// isLegalAddressingMode - Return true if the addressing mode represented
2408/// by AM is legal for this target, for a load/store of the specified type.
2410 const AddrMode &AM, Type *Ty,
2411 unsigned AS, Instruction *I) const {
2412 // The default implementation of this implements a conservative RISCy, r+r and
2413 // r+i addr mode.
2414
2415 // Scalable offsets not supported
2416 if (AM.ScalableOffset)
2417 return false;
2418
2419 // Allows a sign-extended 16-bit immediate field.
2420 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
2421 return false;
2422
2423 // No global is ever allowed as a base.
2424 if (AM.BaseGV)
2425 return false;
2426
2427 // Only support r+r,
2428 switch (AM.Scale) {
2429 case 0: // "r+i" or just "i", depending on HasBaseReg.
2430 break;
2431 case 1:
2432 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
2433 return false;
2434 // Otherwise we have r+r or r+i.
2435 break;
2436 case 2:
2437 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
2438 return false;
2439 // Allow 2*r as r+r.
2440 break;
2441 default: // Don't allow n * r
2442 return false;
2443 }
2444
2445 return true;
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// Stack Protector
2450//===----------------------------------------------------------------------===//
2451
2452// For OpenBSD return its special guard variable. Otherwise return nullptr,
2453// so that SelectionDAG handle SSP.
2454Value *
2456 const LibcallLoweringInfo &Libcalls) const {
2457 RTLIB::LibcallImpl GuardLocalImpl =
2458 Libcalls.getLibcallImpl(RTLIB::STACK_CHECK_GUARD);
2459 if (GuardLocalImpl != RTLIB::impl___guard_local)
2460 return nullptr;
2461
2462 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
2463 const DataLayout &DL = M.getDataLayout();
2464 PointerType *PtrTy =
2465 PointerType::get(M.getContext(), DL.getDefaultGlobalsAddressSpace());
2466 GlobalVariable *G =
2467 M.getOrInsertGlobal(getLibcallImplName(GuardLocalImpl), PtrTy);
2468 G->setVisibility(GlobalValue::HiddenVisibility);
2469 return G;
2470}
2471
2472// Currently only support "standard" __stack_chk_guard.
2473// TODO: add LOAD_STACK_GUARD support.
2475 Module &M, const LibcallLoweringInfo &Libcalls) const {
2476 RTLIB::LibcallImpl StackGuardImpl =
2477 Libcalls.getLibcallImpl(RTLIB::STACK_CHECK_GUARD);
2478 if (StackGuardImpl == RTLIB::Unsupported)
2479 return;
2480
2481 StringRef StackGuardVarName = getLibcallImplName(StackGuardImpl);
2482 M.getOrInsertGlobal(
2483 StackGuardVarName, PointerType::getUnqual(M.getContext()), [=, &M]() {
2484 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()),
2485 false, GlobalVariable::ExternalLinkage,
2486 nullptr, StackGuardVarName);
2487
2488 // FreeBSD has "__stack_chk_guard" defined externally on libc.so
2489 if (M.getDirectAccessExternalData() &&
2490 !TM.getTargetTriple().isOSCygMing() &&
2491 !(TM.getTargetTriple().isPPC64() &&
2492 TM.getTargetTriple().isOSFreeBSD()) &&
2493 (!TM.getTargetTriple().isOSDarwin() ||
2494 TM.getRelocationModel() == Reloc::Static))
2495 GV->setDSOLocal(true);
2496
2497 return GV;
2498 });
2499}
2500
2501// Currently only support "standard" __stack_chk_guard.
2502// TODO: add LOAD_STACK_GUARD support.
2504 const Module &M, const LibcallLoweringInfo &Libcalls) const {
2505 RTLIB::LibcallImpl GuardVarImpl =
2506 Libcalls.getLibcallImpl(RTLIB::STACK_CHECK_GUARD);
2507 if (GuardVarImpl == RTLIB::Unsupported)
2508 return nullptr;
2509 return M.getNamedValue(getLibcallImplName(GuardVarImpl));
2510}
2511
2513 const Module &M, const LibcallLoweringInfo &Libcalls) const {
2514 // MSVC CRT has a function to validate security cookie.
2515 RTLIB::LibcallImpl SecurityCheckCookieLibcall =
2516 Libcalls.getLibcallImpl(RTLIB::SECURITY_CHECK_COOKIE);
2517 if (SecurityCheckCookieLibcall != RTLIB::Unsupported)
2518 return M.getFunction(getLibcallImplName(SecurityCheckCookieLibcall));
2519 return nullptr;
2520}
2521
2525
2529
2530unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2531 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2532}
2533
2537
2541
2545
2547 return MinimumBitTestCmps;
2548}
2549
2551 MinimumBitTestCmps = Val;
2552}
2553
2555 if (TM.Options.LoopAlignment)
2556 return Align(TM.Options.LoopAlignment);
2557 return PrefLoopAlignment;
2558}
2559
2561 MachineBasicBlock *MBB) const {
2562 return MaxBytesForAlignment;
2563}
2564
2565//===----------------------------------------------------------------------===//
2566// Reciprocal Estimates
2567//===----------------------------------------------------------------------===//
2568
2569/// Get the reciprocal estimate attribute string for a function that will
2570/// override the target defaults.
2572 const Function &F = MF.getFunction();
2573 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2574}
2575
2576/// Construct a string for the given reciprocal operation of the given type.
2577/// This string should match the corresponding option to the front-end's
2578/// "-mrecip" flag assuming those strings have been passed through in an
2579/// attribute string. For example, "vec-divf" for a division of a vXf32.
2580static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2581 std::string Name = VT.isVector() ? "vec-" : "";
2582
2583 Name += IsSqrt ? "sqrt" : "div";
2584
2585 // TODO: Handle other float types?
2586 if (VT.getScalarType() == MVT::f64) {
2587 Name += "d";
2588 } else if (VT.getScalarType() == MVT::f16) {
2589 Name += "h";
2590 } else {
2591 assert(VT.getScalarType() == MVT::f32 &&
2592 "Unexpected FP type for reciprocal estimate");
2593 Name += "f";
2594 }
2595
2596 return Name;
2597}
2598
2599/// Return the character position and value (a single numeric character) of a
2600/// customized refinement operation in the input string if it exists. Return
2601/// false if there is no customized refinement step count.
2602static bool parseRefinementStep(StringRef In, size_t &Position,
2603 uint8_t &Value) {
2604 const char RefStepToken = ':';
2605 Position = In.find(RefStepToken);
2606 if (Position == StringRef::npos)
2607 return false;
2608
2609 StringRef RefStepString = In.substr(Position + 1);
2610 // Allow exactly one numeric character for the additional refinement
2611 // step parameter.
2612 if (RefStepString.size() == 1) {
2613 char RefStepChar = RefStepString[0];
2614 if (isDigit(RefStepChar)) {
2615 Value = RefStepChar - '0';
2616 return true;
2617 }
2618 }
2619 report_fatal_error("Invalid refinement step for -recip.");
2620}
2621
2622/// For the input attribute string, return one of the ReciprocalEstimate enum
2623/// status values (enabled, disabled, or not specified) for this operation on
2624/// the specified data type.
2625static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2626 if (Override.empty())
2628
2629 SmallVector<StringRef, 4> OverrideVector;
2630 Override.split(OverrideVector, ',');
2631 unsigned NumArgs = OverrideVector.size();
2632
2633 // Check if "all", "none", or "default" was specified.
2634 if (NumArgs == 1) {
2635 // Look for an optional setting of the number of refinement steps needed
2636 // for this type of reciprocal operation.
2637 size_t RefPos;
2638 uint8_t RefSteps;
2639 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2640 // Split the string for further processing.
2641 Override = Override.substr(0, RefPos);
2642 }
2643
2644 // All reciprocal types are enabled.
2645 if (Override == "all")
2647
2648 // All reciprocal types are disabled.
2649 if (Override == "none")
2651
2652 // Target defaults for enablement are used.
2653 if (Override == "default")
2655 }
2656
2657 // The attribute string may omit the size suffix ('f'/'d').
2658 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2659 std::string VTNameNoSize = VTName;
2660 VTNameNoSize.pop_back();
2661 static const char DisabledPrefix = '!';
2662
2663 for (StringRef RecipType : OverrideVector) {
2664 size_t RefPos;
2665 uint8_t RefSteps;
2666 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2667 RecipType = RecipType.substr(0, RefPos);
2668
2669 // Ignore the disablement token for string matching.
2670 bool IsDisabled = RecipType[0] == DisabledPrefix;
2671 if (IsDisabled)
2672 RecipType = RecipType.substr(1);
2673
2674 if (RecipType == VTName || RecipType == VTNameNoSize)
2677 }
2678
2680}
2681
2682/// For the input attribute string, return the customized refinement step count
2683/// for this operation on the specified data type. If the step count does not
2684/// exist, return the ReciprocalEstimate enum value for unspecified.
2685static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2686 if (Override.empty())
2688
2689 SmallVector<StringRef, 4> OverrideVector;
2690 Override.split(OverrideVector, ',');
2691 unsigned NumArgs = OverrideVector.size();
2692
2693 // Check if "all", "default", or "none" was specified.
2694 if (NumArgs == 1) {
2695 // Look for an optional setting of the number of refinement steps needed
2696 // for this type of reciprocal operation.
2697 size_t RefPos;
2698 uint8_t RefSteps;
2699 if (!parseRefinementStep(Override, RefPos, RefSteps))
2701
2702 // Split the string for further processing.
2703 Override = Override.substr(0, RefPos);
2704 assert(Override != "none" &&
2705 "Disabled reciprocals, but specifed refinement steps?");
2706
2707 // If this is a general override, return the specified number of steps.
2708 if (Override == "all" || Override == "default")
2709 return RefSteps;
2710 }
2711
2712 // The attribute string may omit the size suffix ('f'/'d').
2713 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2714 std::string VTNameNoSize = VTName;
2715 VTNameNoSize.pop_back();
2716
2717 for (StringRef RecipType : OverrideVector) {
2718 size_t RefPos;
2719 uint8_t RefSteps;
2720 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2721 continue;
2722
2723 RecipType = RecipType.substr(0, RefPos);
2724 if (RecipType == VTName || RecipType == VTNameNoSize)
2725 return RefSteps;
2726 }
2727
2729}
2730
2735
2740
2745
2750
2752 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
2753 const MachineMemOperand &MMO) const {
2754 // Single-element vectors are scalarized, so we should generally avoid having
2755 // any memory operations on such types, as they would get scalarized too.
2756 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() &&
2757 BitcastVT.getVectorNumElements() == 1)
2758 return false;
2759
2760 // Don't do if we could do an indexed load on the original type, but not on
2761 // the new one.
2762 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
2763 return true;
2764
2765 MVT LoadMVT = LoadVT.getSimpleVT();
2766
2767 // Don't bother doing this if it's just going to be promoted again later, as
2768 // doing so might interfere with other combines.
2769 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
2770 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
2771 return false;
2772
2773 unsigned Fast = 0;
2774 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
2775 MMO, &Fast) &&
2776 Fast;
2777}
2778
2782
2784 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC,
2785 const TargetLibraryInfo *LibInfo, CodeGenOptLevel OptLevel) const {
2787 if (LI.isVolatile())
2789
2790 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2792
2793 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2795
2796 // Dereferenceability analysis is expensive, skip at O0.
2797 if (OptLevel != CodeGenOptLevel::None &&
2799 LI.getAlign(), DL, &LI, AC,
2800 /*DT=*/nullptr, LibInfo)) {
2802 } else if (LI.hasMetadata(LLVMContext::MD_dereferenceable)) {
2804 }
2805
2806 Flags |= getTargetMMOFlags(LI);
2807 return Flags;
2808}
2809
2812 const DataLayout &DL) const {
2814
2815 if (SI.isVolatile())
2817
2818 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2820
2821 // FIXME: Not preserving dereferenceable
2822 Flags |= getTargetMMOFlags(SI);
2823 return Flags;
2824}
2825
2828 const DataLayout &DL) const {
2830
2831 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2832 if (RMW->isVolatile())
2834 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2835 if (CmpX->isVolatile())
2837 } else
2838 llvm_unreachable("not an atomic instruction");
2839
2840 // FIXME: Not preserving dereferenceable
2841 Flags |= getTargetMMOFlags(AI);
2842 return Flags;
2843}
2844
2846 const VPIntrinsic &VPIntrin) const {
2848 Intrinsic::ID IntrinID = VPIntrin.getIntrinsicID();
2849
2850 switch (IntrinID) {
2851 default:
2852 llvm_unreachable("unexpected intrinsic. Existing code may be appropriate "
2853 "for it, but support must be explicitly enabled");
2854 case Intrinsic::vp_load:
2855 case Intrinsic::vp_gather:
2856 case Intrinsic::experimental_vp_strided_load:
2858 break;
2859 case Intrinsic::vp_store:
2860 case Intrinsic::vp_scatter:
2861 case Intrinsic::experimental_vp_strided_store:
2863 break;
2864 }
2865
2866 if (VPIntrin.hasMetadata(LLVMContext::MD_nontemporal))
2868
2869 Flags |= getTargetMMOFlags(VPIntrin);
2870 return Flags;
2871}
2872
2874 Instruction *Inst,
2875 AtomicOrdering Ord) const {
2876 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2877 return Builder.CreateFence(Ord);
2878 else
2879 return nullptr;
2880}
2881
2883 Instruction *Inst,
2884 AtomicOrdering Ord) const {
2885 if (isAcquireOrStronger(Ord))
2886 return Builder.CreateFence(Ord);
2887 else
2888 return nullptr;
2889}
2890
2891//===----------------------------------------------------------------------===//
2892// GlobalISel Hooks
2893//===----------------------------------------------------------------------===//
2894
2896 const TargetTransformInfo *TTI) const {
2897 auto &MF = *MI.getMF();
2898 auto &MRI = MF.getRegInfo();
2899 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2900 // this helper function computes the maximum number of uses we should consider
2901 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2902 // break even in terms of code size when the original MI has 2 users vs
2903 // choosing to potentially spill. Any more than 2 users we we have a net code
2904 // size increase. This doesn't take into account register pressure though.
2905 auto maxUses = [](unsigned RematCost) {
2906 // A cost of 1 means remats are basically free.
2907 if (RematCost == 1)
2908 return std::numeric_limits<unsigned>::max();
2909 if (RematCost == 2)
2910 return 2U;
2911
2912 // Remat is too expensive, only sink if there's one user.
2913 if (RematCost > 2)
2914 return 1U;
2915 llvm_unreachable("Unexpected remat cost");
2916 };
2917
2918 switch (MI.getOpcode()) {
2919 default:
2920 return false;
2921 // Constants-like instructions should be close to their users.
2922 // We don't want long live-ranges for them.
2923 case TargetOpcode::G_CONSTANT:
2924 case TargetOpcode::G_FCONSTANT:
2925 case TargetOpcode::G_FRAME_INDEX:
2926 case TargetOpcode::G_INTTOPTR:
2927 return true;
2928 case TargetOpcode::G_GLOBAL_VALUE: {
2929 unsigned RematCost = TTI->getGISelRematGlobalCost();
2930 Register Reg = MI.getOperand(0).getReg();
2931 unsigned MaxUses = maxUses(RematCost);
2932 if (MaxUses == UINT_MAX)
2933 return true; // Remats are "free" so always localize.
2934 return MRI.hasAtMostUserInstrs(Reg, MaxUses);
2935 }
2936 }
2937}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
#define LLVM_ABI
Definition Compiler.h:213
This file defines the DenseMap class.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
static LVOptions Options
Definition LVOptions.cpp:25
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register const TargetRegisterInfo * TRI
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static cl::opt< unsigned > MinimumBitTestCmpsOverride("min-bit-test-cmps", cl::init(2), cl::Hidden, cl::desc("Set minimum of largest number of comparisons " "to use bit test for switch."))
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
#define OP_TO_LIBCALL(Name, Enum)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
static cl::opt< bool > DisableStrictNodeMutation("disable-strictnode-mutation", cl::desc("Don't mutate strict-float node to a legalize node"), cl::init(false), cl::Hidden)
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, cl::desc("Set maximum size of jump tables."))
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
static cl::opt< unsigned > MaxStoresPerMemmoveOverride("max-store-memmove", cl::init(0), cl::Hidden, cl::desc("Override target's MaxStoresPerMemmove and " "MaxStoresPerMemmoveOptSize. " "Set to 0 to use the target default."))
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
#define LCALL5(A)
static cl::opt< unsigned > MaxStoresPerMemsetOverride("max-store-memset", cl::init(0), cl::Hidden, cl::desc("Override target's MaxStoresPerMemset and " "MaxStoresPerMemsetOptSize. " "Set to 0 to use the target default."))
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled,...
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults.
static cl::opt< unsigned > MaxStoresPerMemcpyOverride("max-store-memcpy", cl::init(0), cl::Hidden, cl::desc("Override target's MaxStoresPerMemcpy and " "MaxStoresPerMemcpyOptSize. " "Set to 0 to use the target default."))
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add '1' bits from Mask to this vector.
Definition BitVector.h:726
iterator_range< const_set_bits_iterator > set_bits() const
Definition BitVector.h:159
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
This class represents a range of values.
LLVM_ABI unsigned getActiveBits() const
Compute the maximal number of active bits needed to represent every value in this range.
LLVM_ABI ConstantRange umul_sat(const ConstantRange &Other) const
Perform an unsigned saturating multiplication of two constant ranges.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI unsigned getPointerSize(unsigned AS=0) const
The pointer representation size in bytes, rounded up to a whole number of bytes.
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
const Function & getFunction() const
Definition Function.h:166
Module * getParent()
Get the module that this global value is contained inside of...
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2546
LLVM_ABI bool hasAtomicStore() const LLVM_READONLY
Return true if this atomic instruction stores to memory.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Align getAlign() const
Return the alignment of the access that is being performed.
Machine Value Type.
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto all_valuetypes()
SimpleValueType Iteration.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
unsigned getAddrSpace() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI void freezeReservedRegs()
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
const DataLayout & getDataLayout() const
LLVMContext * getContext() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:591
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
unsigned getBitWidthForCttzElements(EVT RetVT, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
void initActions()
Initialize all of the actions to default values.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
Function * getSSPStackGuardCheck(const Module &M, const LibcallLoweringInfo &Libcalls) const
If the target has a standard stack protection check function that performs validation and error handl...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
void setMinimumBitTestCmps(unsigned Val)
Set the minimum of largest of number of comparisons to generate BitTest.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
virtual unsigned getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const
Return the maximum amount of bytes allowed to be emitted when padding for alignment.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
unsigned getMinimumBitTestCmps() const
Retuen the minimum of largest number of comparisons in BitTest.
virtual bool useFPRegsForHalfType() const
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr, CodeGenOptLevel OptLevel=CodeGenOptLevel::Default) const
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Value * getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual bool isJumpTableRelative() const
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
Return the type to use for a scalar shift opcode, given the shifted amount type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call) const
Get the comparison predicate that's to be used to test the result of the comparison libcall against z...
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
TargetLoweringBase(const TargetMachine &TM, const TargetSubtargetInfo &STI)
NOTE: The TargetMachine owns TLOF.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
virtual Value * getSDagStackGuard(const Module &M, const LibcallLoweringInfo &Libcalls) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
static StringRef getLibcallImplName(RTLIB::LibcallImpl Call)
Get the libcall routine name for the specified libcall implementation.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
Returns the target-specific address of the unsafe stack pointer.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
int IntrinsicIDToISD(Intrinsic::ID ID) const
Get the ISD node that corresponds to the Intrinsic ID.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
constexpr LeafTy coefficientNextPowerOf2() const
Definition TypeSize.h:260
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:261
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:788
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition ISDOpcodes.h:45
@ SET_FPENV
Sets the current floating-point environment.
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition ISDOpcodes.h:538
@ STACKADDRESS
STACKADDRESS - Represents the llvm.stackaddress intrinsic.
Definition ISDOpcodes.h:127
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:394
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ RESET_FPENV
Set floating-point environment to default state.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition ISDOpcodes.h:522
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:400
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ CTTZ_ELTS
Returns the number of number of trailing (least significant) zero elements in a vector.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:880
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition ISDOpcodes.h:910
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
Definition ISDOpcodes.h:528
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:774
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition ISDOpcodes.h:407
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ CONVERT_FROM_ARBITRARY_FP
CONVERT_FROM_ARBITRARY_FP - This operator converts from an arbitrary floating-point represented as an...
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition ISDOpcodes.h:715
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:787
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ TRUNCATE_SSAT_U
Definition ISDOpcodes.h:873
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition ISDOpcodes.h:827
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition ISDOpcodes.h:548
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ CTLS
Count leading redundant sign bits.
Definition ISDOpcodes.h:792
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:765
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:649
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
Definition ISDOpcodes.h:653
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition ISDOpcodes.h:899
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:413
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:978
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:328
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:926
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ TRAP
TRAP - Trapping instruction.
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition ISDOpcodes.h:710
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
Definition ISDOpcodes.h:657
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:241
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:959
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition ISDOpcodes.h:699
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition ISDOpcodes.h:921
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition ISDOpcodes.h:997
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:945
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ VECREDUCE_SEQ_FMUL
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ CTTZ_ELTS_ZERO_POISON
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ TRUNCATE_SSAT_S
TRUNCATE_[SU]SAT_[SU] - Truncate for saturated operand [SU] located in middle, prefix for SAT means i...
Definition ISDOpcodes.h:871
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition ISDOpcodes.h:722
@ TRUNCATE_USAT_U
Definition ISDOpcodes.h:875
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:338
@ LOOP_DEPENDENCE_WAR_MASK
The llvm.loop.dependence.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
static const int LAST_INDEXED_MODE
LLVM_ABI Libcall getPOWI(EVT RetVT)
getPOWI - Return the POWI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUREM(EVT VT)
LLVM_ABI Libcall getSHL(EVT VT)
LLVM_ABI Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getLDEXP(EVT RetVT)
getLDEXP - Return the LDEXP_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFREXP(EVT RetVT)
getFREXP - Return the FREXP_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getREM(EVT VT)
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSDIV(EVT VT)
LLVM_ABI Libcall getSRL(EVT VT)
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getSRA(EVT VT)
LLVM_ABI Libcall getUDIV(EVT VT)
LLVM_ABI Libcall getFPLibCall(EVT VT, Libcall Call_F32, Libcall Call_F64, Libcall Call_F80, Libcall Call_F128, Libcall Call_PPCF128)
GetFPLibCall - Helper to return the right libcall for the given floating point type,...
LLVM_ABI Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getLLROUND(EVT VT)
LLVM_ABI Libcall getCOS(EVT RetVT)
Return the COS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getLROUND(EVT VT)
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getLRINT(EVT RetVT)
LLVM_ABI Libcall getCBRT(EVT RetVT)
getCBRT - Return the CBRT_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT)
Return the outline atomics value for the given opcode, atomic ordering and type, or UNKNOWN_LIBCALL i...
LLVM_ABI Libcall getLLRINT(EVT RetVT)
LLVM_ABI Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSREM(EVT VT)
LLVM_ABI Libcall getSIN(EVT RetVT)
Return the SIN_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getSINCOS_STRET(EVT RetVT)
Return the SINCOS_STRET_ value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getPOW(EVT RetVT)
getPOW - Return the POW_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getOutlineAtomicHelper(const Libcall(&LC)[5][4], AtomicOrdering Order, uint64_t MemSize)
Return the outline atomics value for the given atomic ordering, access size and set of libcalls for a...
LLVM_ABI Libcall getMUL(EVT VT)
LLVM_ABI Libcall getCTPOP(EVT VT)
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMULO(EVT VT)
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
InstructionCost Cost
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
Definition Sequence.h:337
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
constexpr force_iteration_on_noniterable_enum_t force_iteration_on_noniterable_enum
Definition Sequence.h:109
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition bit.h:362
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
Definition Analysis.cpp:72
bool isReleaseOrStronger(AtomicOrdering AO)
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
TargetTransformInfo TTI
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
bool isAcquireOrStronger(AtomicOrdering AO)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
Definition ValueTypes.h:493
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
ElementCount getVectorElementCount() const
Definition ValueTypes.h:358
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition ValueTypes.h:486
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:61
bool isFixedLengthVector() const
Definition ValueTypes.h:189
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
Definition ValueTypes.h:427
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
bool isZeroSized() const
Test if the given EVT has zero size, this will fail if called on a scalable type.
Definition ValueTypes.h:140
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition ValueTypes.h:469
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
Matching combinators.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
static RTLIB::Libcall getLibcallFromImpl(RTLIB::LibcallImpl Impl)
Return the libcall provided by Impl.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...