LLVM  7.0.0svn
TargetLoweringBase.cpp
Go to the documentation of this file.
1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLoweringBase class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/StackMaps.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalValue.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/IRBuilder.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/Compiler.h"
55 #include <algorithm>
56 #include <cassert>
57 #include <cstddef>
58 #include <cstdint>
59 #include <cstring>
60 #include <iterator>
61 #include <string>
62 #include <tuple>
63 #include <utility>
64 
65 using namespace llvm;
66 
68  "jump-is-expensive", cl::init(false),
69  cl::desc("Do not create extra branches to split comparison logic."),
70  cl::Hidden);
71 
73  ("min-jump-table-entries", cl::init(4), cl::Hidden,
74  cl::desc("Set minimum number of entries to use a jump table."));
75 
77  ("max-jump-table-size", cl::init(0), cl::Hidden,
78  cl::desc("Set maximum size of jump tables; zero for no limit."));
79 
80 /// Minimum jump table density for normal functions.
81 static cl::opt<unsigned>
82  JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
83  cl::desc("Minimum density for building a jump table in "
84  "a normal function"));
85 
86 /// Minimum jump table density for -Os or -Oz functions.
88  "optsize-jump-table-density", cl::init(40), cl::Hidden,
89  cl::desc("Minimum density for building a jump table in "
90  "an optsize function"));
91 
92 static bool darwinHasSinCos(const Triple &TT) {
93  assert(TT.isOSDarwin() && "should be called with darwin triple");
94  // Don't bother with 32 bit x86.
95  if (TT.getArch() == Triple::x86)
96  return false;
97  // Macos < 10.9 has no sincos_stret.
98  if (TT.isMacOSX())
99  return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
100  // iOS < 7.0 has no sincos_stret.
101  if (TT.isiOS())
102  return !TT.isOSVersionLT(7, 0);
103  // Any other darwin such as WatchOS/TvOS is new enough.
104  return true;
105 }
106 
107 // Although this default value is arbitrary, it is not random. It is assumed
108 // that a condition that evaluates the same way by a higher percentage than this
109 // is best represented as control flow. Therefore, the default value N should be
110 // set such that the win from N% correct executions is greater than the loss
111 // from (100 - N)% mispredicted executions for the majority of intended targets.
113  "min-predictable-branch", cl::init(99),
114  cl::desc("Minimum percentage (0-100) that a condition must be either true "
115  "or false to assume that the condition is predictable"),
116  cl::Hidden);
117 
118 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
119 #define HANDLE_LIBCALL(code, name) \
120  setLibcallName(RTLIB::code, name);
121 #include "llvm/CodeGen/RuntimeLibcalls.def"
122 #undef HANDLE_LIBCALL
123  // Initialize calling conventions to their default.
124  for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
126 
127  // A few names are different on particular architectures or environments.
128  if (TT.isOSDarwin()) {
129  // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
130  // of the gnueabi-style __gnu_*_ieee.
131  // FIXME: What about other targets?
132  setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
133  setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
134 
135  // Some darwins have an optimized __bzero/bzero function.
136  switch (TT.getArch()) {
137  case Triple::x86:
138  case Triple::x86_64:
139  if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
140  setLibcallName(RTLIB::BZERO, "__bzero");
141  break;
142  case Triple::aarch64:
143  setLibcallName(RTLIB::BZERO, "bzero");
144  break;
145  default:
146  break;
147  }
148 
149  if (darwinHasSinCos(TT)) {
150  setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
151  setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
152  if (TT.isWatchABI()) {
153  setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
155  setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
157  }
158  }
159  } else {
160  setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
161  setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
162  }
163 
164  if (TT.isGNUEnvironment() || TT.isOSFuchsia()) {
165  setLibcallName(RTLIB::SINCOS_F32, "sincosf");
166  setLibcallName(RTLIB::SINCOS_F64, "sincos");
167  setLibcallName(RTLIB::SINCOS_F80, "sincosl");
168  setLibcallName(RTLIB::SINCOS_F128, "sincosl");
169  setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
170  }
171 
172  if (TT.isOSOpenBSD()) {
173  setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
174  }
175 }
176 
177 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
178 /// UNKNOWN_LIBCALL if there is none.
180  if (OpVT == MVT::f16) {
181  if (RetVT == MVT::f32)
182  return FPEXT_F16_F32;
183  } else if (OpVT == MVT::f32) {
184  if (RetVT == MVT::f64)
185  return FPEXT_F32_F64;
186  if (RetVT == MVT::f128)
187  return FPEXT_F32_F128;
188  if (RetVT == MVT::ppcf128)
189  return FPEXT_F32_PPCF128;
190  } else if (OpVT == MVT::f64) {
191  if (RetVT == MVT::f128)
192  return FPEXT_F64_F128;
193  else if (RetVT == MVT::ppcf128)
194  return FPEXT_F64_PPCF128;
195  } else if (OpVT == MVT::f80) {
196  if (RetVT == MVT::f128)
197  return FPEXT_F80_F128;
198  }
199 
200  return UNKNOWN_LIBCALL;
201 }
202 
203 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
204 /// UNKNOWN_LIBCALL if there is none.
206  if (RetVT == MVT::f16) {
207  if (OpVT == MVT::f32)
208  return FPROUND_F32_F16;
209  if (OpVT == MVT::f64)
210  return FPROUND_F64_F16;
211  if (OpVT == MVT::f80)
212  return FPROUND_F80_F16;
213  if (OpVT == MVT::f128)
214  return FPROUND_F128_F16;
215  if (OpVT == MVT::ppcf128)
216  return FPROUND_PPCF128_F16;
217  } else if (RetVT == MVT::f32) {
218  if (OpVT == MVT::f64)
219  return FPROUND_F64_F32;
220  if (OpVT == MVT::f80)
221  return FPROUND_F80_F32;
222  if (OpVT == MVT::f128)
223  return FPROUND_F128_F32;
224  if (OpVT == MVT::ppcf128)
225  return FPROUND_PPCF128_F32;
226  } else if (RetVT == MVT::f64) {
227  if (OpVT == MVT::f80)
228  return FPROUND_F80_F64;
229  if (OpVT == MVT::f128)
230  return FPROUND_F128_F64;
231  if (OpVT == MVT::ppcf128)
232  return FPROUND_PPCF128_F64;
233  } else if (RetVT == MVT::f80) {
234  if (OpVT == MVT::f128)
235  return FPROUND_F128_F80;
236  }
237 
238  return UNKNOWN_LIBCALL;
239 }
240 
241 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
242 /// UNKNOWN_LIBCALL if there is none.
244  if (OpVT == MVT::f32) {
245  if (RetVT == MVT::i32)
246  return FPTOSINT_F32_I32;
247  if (RetVT == MVT::i64)
248  return FPTOSINT_F32_I64;
249  if (RetVT == MVT::i128)
250  return FPTOSINT_F32_I128;
251  } else if (OpVT == MVT::f64) {
252  if (RetVT == MVT::i32)
253  return FPTOSINT_F64_I32;
254  if (RetVT == MVT::i64)
255  return FPTOSINT_F64_I64;
256  if (RetVT == MVT::i128)
257  return FPTOSINT_F64_I128;
258  } else if (OpVT == MVT::f80) {
259  if (RetVT == MVT::i32)
260  return FPTOSINT_F80_I32;
261  if (RetVT == MVT::i64)
262  return FPTOSINT_F80_I64;
263  if (RetVT == MVT::i128)
264  return FPTOSINT_F80_I128;
265  } else if (OpVT == MVT::f128) {
266  if (RetVT == MVT::i32)
267  return FPTOSINT_F128_I32;
268  if (RetVT == MVT::i64)
269  return FPTOSINT_F128_I64;
270  if (RetVT == MVT::i128)
271  return FPTOSINT_F128_I128;
272  } else if (OpVT == MVT::ppcf128) {
273  if (RetVT == MVT::i32)
274  return FPTOSINT_PPCF128_I32;
275  if (RetVT == MVT::i64)
276  return FPTOSINT_PPCF128_I64;
277  if (RetVT == MVT::i128)
278  return FPTOSINT_PPCF128_I128;
279  }
280  return UNKNOWN_LIBCALL;
281 }
282 
283 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
284 /// UNKNOWN_LIBCALL if there is none.
286  if (OpVT == MVT::f32) {
287  if (RetVT == MVT::i32)
288  return FPTOUINT_F32_I32;
289  if (RetVT == MVT::i64)
290  return FPTOUINT_F32_I64;
291  if (RetVT == MVT::i128)
292  return FPTOUINT_F32_I128;
293  } else if (OpVT == MVT::f64) {
294  if (RetVT == MVT::i32)
295  return FPTOUINT_F64_I32;
296  if (RetVT == MVT::i64)
297  return FPTOUINT_F64_I64;
298  if (RetVT == MVT::i128)
299  return FPTOUINT_F64_I128;
300  } else if (OpVT == MVT::f80) {
301  if (RetVT == MVT::i32)
302  return FPTOUINT_F80_I32;
303  if (RetVT == MVT::i64)
304  return FPTOUINT_F80_I64;
305  if (RetVT == MVT::i128)
306  return FPTOUINT_F80_I128;
307  } else if (OpVT == MVT::f128) {
308  if (RetVT == MVT::i32)
309  return FPTOUINT_F128_I32;
310  if (RetVT == MVT::i64)
311  return FPTOUINT_F128_I64;
312  if (RetVT == MVT::i128)
313  return FPTOUINT_F128_I128;
314  } else if (OpVT == MVT::ppcf128) {
315  if (RetVT == MVT::i32)
316  return FPTOUINT_PPCF128_I32;
317  if (RetVT == MVT::i64)
318  return FPTOUINT_PPCF128_I64;
319  if (RetVT == MVT::i128)
320  return FPTOUINT_PPCF128_I128;
321  }
322  return UNKNOWN_LIBCALL;
323 }
324 
325 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
326 /// UNKNOWN_LIBCALL if there is none.
328  if (OpVT == MVT::i32) {
329  if (RetVT == MVT::f32)
330  return SINTTOFP_I32_F32;
331  if (RetVT == MVT::f64)
332  return SINTTOFP_I32_F64;
333  if (RetVT == MVT::f80)
334  return SINTTOFP_I32_F80;
335  if (RetVT == MVT::f128)
336  return SINTTOFP_I32_F128;
337  if (RetVT == MVT::ppcf128)
338  return SINTTOFP_I32_PPCF128;
339  } else if (OpVT == MVT::i64) {
340  if (RetVT == MVT::f32)
341  return SINTTOFP_I64_F32;
342  if (RetVT == MVT::f64)
343  return SINTTOFP_I64_F64;
344  if (RetVT == MVT::f80)
345  return SINTTOFP_I64_F80;
346  if (RetVT == MVT::f128)
347  return SINTTOFP_I64_F128;
348  if (RetVT == MVT::ppcf128)
349  return SINTTOFP_I64_PPCF128;
350  } else if (OpVT == MVT::i128) {
351  if (RetVT == MVT::f32)
352  return SINTTOFP_I128_F32;
353  if (RetVT == MVT::f64)
354  return SINTTOFP_I128_F64;
355  if (RetVT == MVT::f80)
356  return SINTTOFP_I128_F80;
357  if (RetVT == MVT::f128)
358  return SINTTOFP_I128_F128;
359  if (RetVT == MVT::ppcf128)
360  return SINTTOFP_I128_PPCF128;
361  }
362  return UNKNOWN_LIBCALL;
363 }
364 
365 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
366 /// UNKNOWN_LIBCALL if there is none.
368  if (OpVT == MVT::i32) {
369  if (RetVT == MVT::f32)
370  return UINTTOFP_I32_F32;
371  if (RetVT == MVT::f64)
372  return UINTTOFP_I32_F64;
373  if (RetVT == MVT::f80)
374  return UINTTOFP_I32_F80;
375  if (RetVT == MVT::f128)
376  return UINTTOFP_I32_F128;
377  if (RetVT == MVT::ppcf128)
378  return UINTTOFP_I32_PPCF128;
379  } else if (OpVT == MVT::i64) {
380  if (RetVT == MVT::f32)
381  return UINTTOFP_I64_F32;
382  if (RetVT == MVT::f64)
383  return UINTTOFP_I64_F64;
384  if (RetVT == MVT::f80)
385  return UINTTOFP_I64_F80;
386  if (RetVT == MVT::f128)
387  return UINTTOFP_I64_F128;
388  if (RetVT == MVT::ppcf128)
389  return UINTTOFP_I64_PPCF128;
390  } else if (OpVT == MVT::i128) {
391  if (RetVT == MVT::f32)
392  return UINTTOFP_I128_F32;
393  if (RetVT == MVT::f64)
394  return UINTTOFP_I128_F64;
395  if (RetVT == MVT::f80)
396  return UINTTOFP_I128_F80;
397  if (RetVT == MVT::f128)
398  return UINTTOFP_I128_F128;
399  if (RetVT == MVT::ppcf128)
400  return UINTTOFP_I128_PPCF128;
401  }
402  return UNKNOWN_LIBCALL;
403 }
404 
405 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
406 #define OP_TO_LIBCALL(Name, Enum) \
407  case Name: \
408  switch (VT.SimpleTy) { \
409  default: \
410  return UNKNOWN_LIBCALL; \
411  case MVT::i8: \
412  return Enum##_1; \
413  case MVT::i16: \
414  return Enum##_2; \
415  case MVT::i32: \
416  return Enum##_4; \
417  case MVT::i64: \
418  return Enum##_8; \
419  case MVT::i128: \
420  return Enum##_16; \
421  }
422 
423  switch (Opc) {
424  OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
425  OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
426  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
427  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
428  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
429  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
430  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
431  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
432  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
433  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
434  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
435  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
436  }
437 
438 #undef OP_TO_LIBCALL
439 
440  return UNKNOWN_LIBCALL;
441 }
442 
444  switch (ElementSize) {
445  case 1:
446  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
447  case 2:
448  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
449  case 4:
450  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
451  case 8:
452  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
453  case 16:
454  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
455  default:
456  return UNKNOWN_LIBCALL;
457  }
458 }
459 
461  switch (ElementSize) {
462  case 1:
463  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
464  case 2:
465  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
466  case 4:
467  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
468  case 8:
469  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
470  case 16:
471  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
472  default:
473  return UNKNOWN_LIBCALL;
474  }
475 }
476 
478  switch (ElementSize) {
479  case 1:
480  return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
481  case 2:
482  return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
483  case 4:
484  return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
485  case 8:
486  return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
487  case 16:
488  return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
489  default:
490  return UNKNOWN_LIBCALL;
491  }
492 }
493 
494 /// InitCmpLibcallCCs - Set default comparison libcall CC.
495 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
496  memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
497  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
498  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
499  CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
500  CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
501  CCs[RTLIB::UNE_F32] = ISD::SETNE;
502  CCs[RTLIB::UNE_F64] = ISD::SETNE;
503  CCs[RTLIB::UNE_F128] = ISD::SETNE;
504  CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
505  CCs[RTLIB::OGE_F32] = ISD::SETGE;
506  CCs[RTLIB::OGE_F64] = ISD::SETGE;
507  CCs[RTLIB::OGE_F128] = ISD::SETGE;
508  CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
509  CCs[RTLIB::OLT_F32] = ISD::SETLT;
510  CCs[RTLIB::OLT_F64] = ISD::SETLT;
511  CCs[RTLIB::OLT_F128] = ISD::SETLT;
512  CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
513  CCs[RTLIB::OLE_F32] = ISD::SETLE;
514  CCs[RTLIB::OLE_F64] = ISD::SETLE;
515  CCs[RTLIB::OLE_F128] = ISD::SETLE;
516  CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
517  CCs[RTLIB::OGT_F32] = ISD::SETGT;
518  CCs[RTLIB::OGT_F64] = ISD::SETGT;
519  CCs[RTLIB::OGT_F128] = ISD::SETGT;
520  CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
521  CCs[RTLIB::UO_F32] = ISD::SETNE;
522  CCs[RTLIB::UO_F64] = ISD::SETNE;
523  CCs[RTLIB::UO_F128] = ISD::SETNE;
524  CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
525  CCs[RTLIB::O_F32] = ISD::SETEQ;
526  CCs[RTLIB::O_F64] = ISD::SETEQ;
527  CCs[RTLIB::O_F128] = ISD::SETEQ;
528  CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
529 }
530 
531 /// NOTE: The TargetMachine owns TLOF.
533  initActions();
534 
535  // Perform these initializations only once.
537  MaxLoadsPerMemcmp = 8;
541  UseUnderscoreSetJmp = false;
542  UseUnderscoreLongJmp = false;
543  HasMultipleConditionRegisters = false;
544  HasExtractBitsInsn = false;
545  JumpIsExpensive = JumpIsExpensiveOverride;
547  EnableExtLdPromotion = false;
548  HasFloatingPointExceptions = true;
549  StackPointerRegisterToSaveRestore = 0;
550  BooleanContents = UndefinedBooleanContent;
551  BooleanFloatContents = UndefinedBooleanContent;
552  BooleanVectorContents = UndefinedBooleanContent;
553  SchedPreferenceInfo = Sched::ILP;
554  JumpBufSize = 0;
555  JumpBufAlignment = 0;
556  MinFunctionAlignment = 0;
557  PrefFunctionAlignment = 0;
558  PrefLoopAlignment = 0;
560  MinStackArgumentAlignment = 1;
561  // TODO: the default will be switched to 0 in the next commit, along
562  // with the Target-specific changes necessary.
563  MaxAtomicSizeInBitsSupported = 1024;
564 
565  MinCmpXchgSizeInBits = 0;
566  SupportsUnalignedAtomics = false;
567 
568  std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
569 
570  InitLibcalls(TM.getTargetTriple());
571  InitCmpLibcallCCs(CmpLibcallCCs);
572 }
573 
575  // All operations default to being supported.
576  memset(OpActions, 0, sizeof(OpActions));
577  memset(LoadExtActions, 0, sizeof(LoadExtActions));
578  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
579  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
580  memset(CondCodeActions, 0, sizeof(CondCodeActions));
581  std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
582  std::fill(std::begin(TargetDAGCombineArray),
583  std::end(TargetDAGCombineArray), 0);
584 
585  // Set default actions for various operations.
586  for (MVT VT : MVT::all_valuetypes()) {
587  // Default all indexed load / store to expand.
588  for (unsigned IM = (unsigned)ISD::PRE_INC;
589  IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
590  setIndexedLoadAction(IM, VT, Expand);
591  setIndexedStoreAction(IM, VT, Expand);
592  }
593 
594  // Most backends expect to see the node which just returns the value loaded.
596 
597  // These operations default to expand.
610 
611  // Overflow operations default to expand
618 
619  // ADDCARRY operations default to expand
623 
624  // ADDC/ADDE/SUBC/SUBE default to expand.
629 
630  // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
633 
635 
636  // These library functions default to expand.
639 
640  // These operations default to expand for vector types.
641  if (VT.isVector()) {
646  }
647 
648  // For most targets @llvm.get.dynamic.area.offset just returns 0.
650  }
651 
652  // Most targets ignore the @llvm.prefetch intrinsic.
654 
655  // Most targets also ignore the @llvm.readcyclecounter intrinsic.
657 
658  // ConstantFP nodes default to expand. Targets can either change this to
659  // Legal, in which case all fp constants are legal, or use isFPImmLegal()
660  // to optimize expansions for certain constants.
666 
667  // These library functions default to expand.
668  for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
680  }
681 
682  // Default ISD::TRAP to expand (which turns it into abort).
684 
685  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
686  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
688 }
689 
691  EVT) const {
692  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
693 }
694 
696  bool LegalTypes) const {
697  assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
698  if (LHSTy.isVector())
699  return LHSTy;
700  return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
701  : getPointerTy(DL);
702 }
703 
704 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
705  assert(isTypeLegal(VT));
706  switch (Op) {
707  default:
708  return false;
709  case ISD::SDIV:
710  case ISD::UDIV:
711  case ISD::SREM:
712  case ISD::UREM:
713  return true;
714  }
715 }
716 
718  // If the command-line option was specified, ignore this request.
719  if (!JumpIsExpensiveOverride.getNumOccurrences())
720  JumpIsExpensive = isExpensive;
721 }
722 
724 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
725  // If this is a simple type, use the ComputeRegisterProp mechanism.
726  if (VT.isSimple()) {
727  MVT SVT = VT.getSimpleVT();
728  assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
729  MVT NVT = TransformToType[SVT.SimpleTy];
731 
732  assert((LA == TypeLegal || LA == TypeSoftenFloat ||
734  "Promote may not follow Expand or Promote");
735 
736  if (LA == TypeSplitVector)
737  return LegalizeKind(LA,
738  EVT::getVectorVT(Context, SVT.getVectorElementType(),
739  SVT.getVectorNumElements() / 2));
740  if (LA == TypeScalarizeVector)
741  return LegalizeKind(LA, SVT.getVectorElementType());
742  return LegalizeKind(LA, NVT);
743  }
744 
745  // Handle Extended Scalar Types.
746  if (!VT.isVector()) {
747  assert(VT.isInteger() && "Float types must be simple");
748  unsigned BitSize = VT.getSizeInBits();
749  // First promote to a power-of-two size, then expand if necessary.
750  if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
751  EVT NVT = VT.getRoundIntegerType(Context);
752  assert(NVT != VT && "Unable to round integer VT");
753  LegalizeKind NextStep = getTypeConversion(Context, NVT);
754  // Avoid multi-step promotion.
755  if (NextStep.first == TypePromoteInteger)
756  return NextStep;
757  // Return rounded integer type.
758  return LegalizeKind(TypePromoteInteger, NVT);
759  }
760 
762  EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
763  }
764 
765  // Handle vector types.
766  unsigned NumElts = VT.getVectorNumElements();
767  EVT EltVT = VT.getVectorElementType();
768 
769  // Vectors with only one element are always scalarized.
770  if (NumElts == 1)
771  return LegalizeKind(TypeScalarizeVector, EltVT);
772 
773  // Try to widen vector elements until the element type is a power of two and
774  // promote it to a legal type later on, for example:
775  // <3 x i8> -> <4 x i8> -> <4 x i32>
776  if (EltVT.isInteger()) {
777  // Vectors with a number of elements that is not a power of two are always
778  // widened, for example <3 x i8> -> <4 x i8>.
779  if (!VT.isPow2VectorType()) {
780  NumElts = (unsigned)NextPowerOf2(NumElts);
781  EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
782  return LegalizeKind(TypeWidenVector, NVT);
783  }
784 
785  // Examine the element type.
786  LegalizeKind LK = getTypeConversion(Context, EltVT);
787 
788  // If type is to be expanded, split the vector.
789  // <4 x i140> -> <2 x i140>
790  if (LK.first == TypeExpandInteger)
792  EVT::getVectorVT(Context, EltVT, NumElts / 2));
793 
794  // Promote the integer element types until a legal vector type is found
795  // or until the element integer type is too big. If a legal type was not
796  // found, fallback to the usual mechanism of widening/splitting the
797  // vector.
798  EVT OldEltVT = EltVT;
799  while (true) {
800  // Increase the bitwidth of the element to the next pow-of-two
801  // (which is greater than 8 bits).
802  EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
803  .getRoundIntegerType(Context);
804 
805  // Stop trying when getting a non-simple element type.
806  // Note that vector elements may be greater than legal vector element
807  // types. Example: X86 XMM registers hold 64bit element on 32bit
808  // systems.
809  if (!EltVT.isSimple())
810  break;
811 
812  // Build a new vector type and check if it is legal.
813  MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
814  // Found a legal promoted vector type.
815  if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
817  EVT::getVectorVT(Context, EltVT, NumElts));
818  }
819 
820  // Reset the type to the unexpanded type if we did not find a legal vector
821  // type with a promoted vector element type.
822  EltVT = OldEltVT;
823  }
824 
825  // Try to widen the vector until a legal type is found.
826  // If there is no wider legal type, split the vector.
827  while (true) {
828  // Round up to the next power of 2.
829  NumElts = (unsigned)NextPowerOf2(NumElts);
830 
831  // If there is no simple vector type with this many elements then there
832  // cannot be a larger legal vector type. Note that this assumes that
833  // there are no skipped intermediate vector types in the simple types.
834  if (!EltVT.isSimple())
835  break;
836  MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
837  if (LargerVector == MVT())
838  break;
839 
840  // If this type is legal then widen the vector.
841  if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
842  return LegalizeKind(TypeWidenVector, LargerVector);
843  }
844 
845  // Widen odd vectors to next power of two.
846  if (!VT.isPow2VectorType()) {
847  EVT NVT = VT.getPow2VectorType(Context);
848  return LegalizeKind(TypeWidenVector, NVT);
849  }
850 
851  // Vectors with illegal element types are expanded.
852  EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
853  return LegalizeKind(TypeSplitVector, NVT);
854 }
855 
856 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
857  unsigned &NumIntermediates,
858  MVT &RegisterVT,
859  TargetLoweringBase *TLI) {
860  // Figure out the right, legal destination reg to copy into.
861  unsigned NumElts = VT.getVectorNumElements();
862  MVT EltTy = VT.getVectorElementType();
863 
864  unsigned NumVectorRegs = 1;
865 
866  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
867  // could break down into LHS/RHS like LegalizeDAG does.
868  if (!isPowerOf2_32(NumElts)) {
869  NumVectorRegs = NumElts;
870  NumElts = 1;
871  }
872 
873  // Divide the input until we get to a supported size. This will always
874  // end with a scalar if the target doesn't support vectors.
875  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
876  NumElts >>= 1;
877  NumVectorRegs <<= 1;
878  }
879 
880  NumIntermediates = NumVectorRegs;
881 
882  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
883  if (!TLI->isTypeLegal(NewVT))
884  NewVT = EltTy;
885  IntermediateVT = NewVT;
886 
887  unsigned NewVTSize = NewVT.getSizeInBits();
888 
889  // Convert sizes such as i33 to i64.
890  if (!isPowerOf2_32(NewVTSize))
891  NewVTSize = NextPowerOf2(NewVTSize);
892 
893  MVT DestVT = TLI->getRegisterType(NewVT);
894  RegisterVT = DestVT;
895  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
896  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
897 
898  // Otherwise, promotion or legal types use the same number of registers as
899  // the vector decimated to the appropriate level.
900  return NumVectorRegs;
901 }
902 
903 /// isLegalRC - Return true if the value types that can be represented by the
904 /// specified register class are all legal.
906  const TargetRegisterClass &RC) const {
907  for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
908  if (isTypeLegal(*I))
909  return true;
910  return false;
911 }
912 
913 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
914 /// sequence of memory operands that is recognized by PrologEpilogInserter.
917  MachineBasicBlock *MBB) const {
918  MachineInstr *MI = &InitialMI;
919  MachineFunction &MF = *MI->getMF();
920  MachineFrameInfo &MFI = MF.getFrameInfo();
921 
922  // We're handling multiple types of operands here:
923  // PATCHPOINT MetaArgs - live-in, read only, direct
924  // STATEPOINT Deopt Spill - live-through, read only, indirect
925  // STATEPOINT Deopt Alloca - live-through, read only, direct
926  // (We're currently conservative and mark the deopt slots read/write in
927  // practice.)
928  // STATEPOINT GC Spill - live-through, read/write, indirect
929  // STATEPOINT GC Alloca - live-through, read/write, direct
930  // The live-in vs live-through is handled already (the live through ones are
931  // all stack slots), but we need to handle the different type of stackmap
932  // operands and memory effects here.
933 
934  // MI changes inside this loop as we grow operands.
935  for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
936  MachineOperand &MO = MI->getOperand(OperIdx);
937  if (!MO.isFI())
938  continue;
939 
940  // foldMemoryOperand builds a new MI after replacing a single FI operand
941  // with the canonical set of five x86 addressing-mode operands.
942  int FI = MO.getIndex();
943  MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
944 
945  // Copy operands before the frame-index.
946  for (unsigned i = 0; i < OperIdx; ++i)
947  MIB.add(MI->getOperand(i));
948  // Add frame index operands recognized by stackmaps.cpp
949  if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
950  // indirect-mem-ref tag, size, #FI, offset.
951  // Used for spills inserted by StatepointLowering. This codepath is not
952  // used for patchpoints/stackmaps at all, for these spilling is done via
953  // foldMemoryOperand callback only.
954  assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
955  MIB.addImm(StackMaps::IndirectMemRefOp);
956  MIB.addImm(MFI.getObjectSize(FI));
957  MIB.add(MI->getOperand(OperIdx));
958  MIB.addImm(0);
959  } else {
960  // direct-mem-ref tag, #FI, offset.
961  // Used by patchpoint, and direct alloca arguments to statepoints
962  MIB.addImm(StackMaps::DirectMemRefOp);
963  MIB.add(MI->getOperand(OperIdx));
964  MIB.addImm(0);
965  }
966  // Copy the operands after the frame index.
967  for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
968  MIB.add(MI->getOperand(i));
969 
970  // Inherit previous memory operands.
971  MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
972  assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
973 
974  // Add a new memory operand for this FI.
975  assert(MFI.getObjectOffset(FI) != -1);
976 
977  auto Flags = MachineMemOperand::MOLoad;
978  if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
981  }
983  MachinePointerInfo::getFixedStack(MF, FI), Flags,
985  MIB->addMemOperand(MF, MMO);
986 
987  // Replace the instruction and update the operand index.
988  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
989  OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
990  MI->eraseFromParent();
991  MI = MIB;
992  }
993  return MBB;
994 }
995 
998  MachineBasicBlock *MBB) const {
999  assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
1000  "Called emitXRayCustomEvent on the wrong MI!");
1001  auto &MF = *MI.getMF();
1002  auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1003  for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1004  MIB.add(MI.getOperand(OpIdx));
1005 
1006  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1007  MI.eraseFromParent();
1008  return MBB;
1009 }
1010 
1013  MachineBasicBlock *MBB) const {
1014  assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
1015  "Called emitXRayTypedEvent on the wrong MI!");
1016  auto &MF = *MI.getMF();
1017  auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1018  for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1019  MIB.add(MI.getOperand(OpIdx));
1020 
1021  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1022  MI.eraseFromParent();
1023  return MBB;
1024 }
1025 
1026 /// findRepresentativeClass - Return the largest legal super-reg register class
1027 /// of the register class for the specified type and its associated "cost".
1028 // This function is in TargetLowering because it uses RegClassForVT which would
1029 // need to be moved to TargetRegisterInfo and would necessitate moving
1030 // isTypeLegal over as well - a massive change that would just require
1031 // TargetLowering having a TargetRegisterInfo class member that it would use.
1032 std::pair<const TargetRegisterClass *, uint8_t>
1034  MVT VT) const {
1035  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1036  if (!RC)
1037  return std::make_pair(RC, 0);
1038 
1039  // Compute the set of all super-register classes.
1040  BitVector SuperRegRC(TRI->getNumRegClasses());
1041  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1042  SuperRegRC.setBitsInMask(RCI.getMask());
1043 
1044  // Find the first legal register class with the largest spill size.
1045  const TargetRegisterClass *BestRC = RC;
1046  for (unsigned i : SuperRegRC.set_bits()) {
1047  const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1048  // We want the largest possible spill size.
1049  if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1050  continue;
1051  if (!isLegalRC(*TRI, *SuperRC))
1052  continue;
1053  BestRC = SuperRC;
1054  }
1055  return std::make_pair(BestRC, 1);
1056 }
1057 
1058 /// computeRegisterProperties - Once all of the register classes are added,
1059 /// this allows us to compute derived properties we expose.
1061  const TargetRegisterInfo *TRI) {
1063  "Too many value types for ValueTypeActions to hold!");
1064 
1065  // Everything defaults to needing one register.
1066  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1067  NumRegistersForVT[i] = 1;
1068  RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1069  }
1070  // ...except isVoid, which doesn't need any registers.
1071  NumRegistersForVT[MVT::isVoid] = 0;
1072 
1073  // Find the largest integer register class.
1074  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1075  for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1076  assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1077 
1078  // Every integer value type larger than this largest register takes twice as
1079  // many registers to represent as the previous ValueType.
1080  for (unsigned ExpandedReg = LargestIntReg + 1;
1081  ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1082  NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1083  RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1084  TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1087  }
1088 
1089  // Inspect all of the ValueType's smaller than the largest integer
1090  // register to see which ones need promotion.
1091  unsigned LegalIntReg = LargestIntReg;
1092  for (unsigned IntReg = LargestIntReg - 1;
1093  IntReg >= (unsigned)MVT::i1; --IntReg) {
1094  MVT IVT = (MVT::SimpleValueType)IntReg;
1095  if (isTypeLegal(IVT)) {
1096  LegalIntReg = IntReg;
1097  } else {
1098  RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1099  (const MVT::SimpleValueType)LegalIntReg;
1101  }
1102  }
1103 
1104  // ppcf128 type is really two f64's.
1105  if (!isTypeLegal(MVT::ppcf128)) {
1106  if (isTypeLegal(MVT::f64)) {
1107  NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1108  RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1109  TransformToType[MVT::ppcf128] = MVT::f64;
1111  } else {
1112  NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1113  RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1114  TransformToType[MVT::ppcf128] = MVT::i128;
1116  }
1117  }
1118 
1119  // Decide how to handle f128. If the target does not have native f128 support,
1120  // expand it to i128 and we will be generating soft float library calls.
1121  if (!isTypeLegal(MVT::f128)) {
1122  NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1123  RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1124  TransformToType[MVT::f128] = MVT::i128;
1126  }
1127 
1128  // Decide how to handle f64. If the target does not have native f64 support,
1129  // expand it to i64 and we will be generating soft float library calls.
1130  if (!isTypeLegal(MVT::f64)) {
1131  NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1132  RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1133  TransformToType[MVT::f64] = MVT::i64;
1135  }
1136 
1137  // Decide how to handle f32. If the target does not have native f32 support,
1138  // expand it to i32 and we will be generating soft float library calls.
1139  if (!isTypeLegal(MVT::f32)) {
1140  NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1141  RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1142  TransformToType[MVT::f32] = MVT::i32;
1144  }
1145 
1146  // Decide how to handle f16. If the target does not have native f16 support,
1147  // promote it to f32, because there are no f16 library calls (except for
1148  // conversions).
1149  if (!isTypeLegal(MVT::f16)) {
1150  NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1151  RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1152  TransformToType[MVT::f16] = MVT::f32;
1154  }
1155 
1156  // Loop over all of the vector value types to see which need transformations.
1157  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1158  i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1159  MVT VT = (MVT::SimpleValueType) i;
1160  if (isTypeLegal(VT))
1161  continue;
1162 
1163  MVT EltVT = VT.getVectorElementType();
1164  unsigned NElts = VT.getVectorNumElements();
1165  bool IsLegalWiderType = false;
1166  LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1167  switch (PreferredAction) {
1168  case TypePromoteInteger:
1169  // Try to promote the elements of integer vectors. If no legal
1170  // promotion was found, fall through to the widen-vector method.
1171  for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1172  MVT SVT = (MVT::SimpleValueType) nVT;
1173  // Promote vectors of integers to vectors with the same number
1174  // of elements, with a wider element type.
1175  if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1176  SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1177  TransformToType[i] = SVT;
1178  RegisterTypeForVT[i] = SVT;
1179  NumRegistersForVT[i] = 1;
1181  IsLegalWiderType = true;
1182  break;
1183  }
1184  }
1185  if (IsLegalWiderType)
1186  break;
1188 
1189  case TypeWidenVector:
1190  // Try to widen the vector.
1191  for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1192  MVT SVT = (MVT::SimpleValueType) nVT;
1193  if (SVT.getVectorElementType() == EltVT
1194  && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1195  TransformToType[i] = SVT;
1196  RegisterTypeForVT[i] = SVT;
1197  NumRegistersForVT[i] = 1;
1199  IsLegalWiderType = true;
1200  break;
1201  }
1202  }
1203  if (IsLegalWiderType)
1204  break;
1206 
1207  case TypeSplitVector:
1208  case TypeScalarizeVector: {
1209  MVT IntermediateVT;
1210  MVT RegisterVT;
1211  unsigned NumIntermediates;
1212  NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1213  NumIntermediates, RegisterVT, this);
1214  RegisterTypeForVT[i] = RegisterVT;
1215 
1216  MVT NVT = VT.getPow2VectorType();
1217  if (NVT == VT) {
1218  // Type is already a power of 2. The default action is to split.
1219  TransformToType[i] = MVT::Other;
1220  if (PreferredAction == TypeScalarizeVector)
1222  else if (PreferredAction == TypeSplitVector)
1224  else
1225  // Set type action according to the number of elements.
1227  : TypeSplitVector);
1228  } else {
1229  TransformToType[i] = NVT;
1231  }
1232  break;
1233  }
1234  default:
1235  llvm_unreachable("Unknown vector legalization action!");
1236  }
1237  }
1238 
1239  // Determine the 'representative' register class for each value type.
1240  // An representative register class is the largest (meaning one which is
1241  // not a sub-register class / subreg register class) legal register class for
1242  // a group of value types. For example, on i386, i8, i16, and i32
1243  // representative would be GR32; while on x86_64 it's GR64.
1244  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1245  const TargetRegisterClass* RRC;
1246  uint8_t Cost;
1247  std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1248  RepRegClassForVT[i] = RRC;
1249  RepRegClassCostForVT[i] = Cost;
1250  }
1251 }
1252 
1254  EVT VT) const {
1255  assert(!VT.isVector() && "No default SetCC type for vectors!");
1256  return getPointerTy(DL).SimpleTy;
1257 }
1258 
1260  return MVT::i32; // return the default value
1261 }
1262 
1263 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1264 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1265 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1266 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1267 ///
1268 /// This method returns the number of registers needed, and the VT for each
1269 /// register. It also returns the VT and quantity of the intermediate values
1270 /// before they are promoted/expanded.
1272  EVT &IntermediateVT,
1273  unsigned &NumIntermediates,
1274  MVT &RegisterVT) const {
1275  unsigned NumElts = VT.getVectorNumElements();
1276 
1277  // If there is a wider vector type with the same element type as this one,
1278  // or a promoted vector type that has the same number of elements which
1279  // are wider, then we should convert to that legal vector type.
1280  // This handles things like <2 x float> -> <4 x float> and
1281  // <4 x i1> -> <4 x i32>.
1282  LegalizeTypeAction TA = getTypeAction(Context, VT);
1283  if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1284  EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1285  if (isTypeLegal(RegisterEVT)) {
1286  IntermediateVT = RegisterEVT;
1287  RegisterVT = RegisterEVT.getSimpleVT();
1288  NumIntermediates = 1;
1289  return 1;
1290  }
1291  }
1292 
1293  // Figure out the right, legal destination reg to copy into.
1294  EVT EltTy = VT.getVectorElementType();
1295 
1296  unsigned NumVectorRegs = 1;
1297 
1298  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1299  // could break down into LHS/RHS like LegalizeDAG does.
1300  if (!isPowerOf2_32(NumElts)) {
1301  NumVectorRegs = NumElts;
1302  NumElts = 1;
1303  }
1304 
1305  // Divide the input until we get to a supported size. This will always
1306  // end with a scalar if the target doesn't support vectors.
1307  while (NumElts > 1 && !isTypeLegal(
1308  EVT::getVectorVT(Context, EltTy, NumElts))) {
1309  NumElts >>= 1;
1310  NumVectorRegs <<= 1;
1311  }
1312 
1313  NumIntermediates = NumVectorRegs;
1314 
1315  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1316  if (!isTypeLegal(NewVT))
1317  NewVT = EltTy;
1318  IntermediateVT = NewVT;
1319 
1320  MVT DestVT = getRegisterType(Context, NewVT);
1321  RegisterVT = DestVT;
1322  unsigned NewVTSize = NewVT.getSizeInBits();
1323 
1324  // Convert sizes such as i33 to i64.
1325  if (!isPowerOf2_32(NewVTSize))
1326  NewVTSize = NextPowerOf2(NewVTSize);
1327 
1328  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1329  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1330 
1331  // Otherwise, promotion or legal types use the same number of registers as
1332  // the vector decimated to the appropriate level.
1333  return NumVectorRegs;
1334 }
1335 
1336 /// Get the EVTs and ArgFlags collections that represent the legalized return
1337 /// type of the given function. This does not require a DAG or a return value,
1338 /// and is suitable for use before any DAGs for the function are constructed.
1339 /// TODO: Move this out of TargetLowering.cpp.
1342  const TargetLowering &TLI, const DataLayout &DL) {
1343  SmallVector<EVT, 4> ValueVTs;
1344  ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1345  unsigned NumValues = ValueVTs.size();
1346  if (NumValues == 0) return;
1347 
1348  for (unsigned j = 0, f = NumValues; j != f; ++j) {
1349  EVT VT = ValueVTs[j];
1350  ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1351 
1352  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1353  ExtendKind = ISD::SIGN_EXTEND;
1354  else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1355  ExtendKind = ISD::ZERO_EXTEND;
1356 
1357  // FIXME: C calling convention requires the return type to be promoted to
1358  // at least 32-bit. But this is not necessary for non-C calling
1359  // conventions. The frontend should mark functions whose return values
1360  // require promoting with signext or zeroext attributes.
1361  if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1362  MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1363  if (VT.bitsLT(MinVT))
1364  VT = MinVT;
1365  }
1366 
1367  unsigned NumParts =
1368  TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
1369  MVT PartVT =
1370  TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
1371 
1372  // 'inreg' on function refers to return value
1373  ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1374  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1375  Flags.setInReg();
1376 
1377  // Propagate extension type if any
1378  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1379  Flags.setSExt();
1380  else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1381  Flags.setZExt();
1382 
1383  for (unsigned i = 0; i < NumParts; ++i)
1384  Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1385  }
1386 }
1387 
1388 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1389 /// function arguments in the caller parameter area. This is the actual
1390 /// alignment, not its logarithm.
1392  const DataLayout &DL) const {
1393  return DL.getABITypeAlignment(Ty);
1394 }
1395 
1397  const DataLayout &DL, EVT VT,
1398  unsigned AddrSpace,
1399  unsigned Alignment,
1400  bool *Fast) const {
1401  // Check if the specified alignment is sufficient based on the data layout.
1402  // TODO: While using the data layout works in practice, a better solution
1403  // would be to implement this check directly (make this a virtual function).
1404  // For example, the ABI alignment may change based on software platform while
1405  // this function should only be affected by hardware implementation.
1406  Type *Ty = VT.getTypeForEVT(Context);
1407  if (Alignment >= DL.getABITypeAlignment(Ty)) {
1408  // Assume that an access that meets the ABI-specified alignment is fast.
1409  if (Fast != nullptr)
1410  *Fast = true;
1411  return true;
1412  }
1413 
1414  // This is a misaligned access.
1415  return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1416 }
1417 
1419  return BranchProbability(MinPercentageForPredictableBranch, 100);
1420 }
1421 
1422 //===----------------------------------------------------------------------===//
1423 // TargetTransformInfo Helpers
1424 //===----------------------------------------------------------------------===//
1425 
1427  enum InstructionOpcodes {
1428 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1429 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1430 #include "llvm/IR/Instruction.def"
1431  };
1432  switch (static_cast<InstructionOpcodes>(Opcode)) {
1433  case Ret: return 0;
1434  case Br: return 0;
1435  case Switch: return 0;
1436  case IndirectBr: return 0;
1437  case Invoke: return 0;
1438  case Resume: return 0;
1439  case Unreachable: return 0;
1440  case CleanupRet: return 0;
1441  case CatchRet: return 0;
1442  case CatchPad: return 0;
1443  case CatchSwitch: return 0;
1444  case CleanupPad: return 0;
1445  case Add: return ISD::ADD;
1446  case FAdd: return ISD::FADD;
1447  case Sub: return ISD::SUB;
1448  case FSub: return ISD::FSUB;
1449  case Mul: return ISD::MUL;
1450  case FMul: return ISD::FMUL;
1451  case UDiv: return ISD::UDIV;
1452  case SDiv: return ISD::SDIV;
1453  case FDiv: return ISD::FDIV;
1454  case URem: return ISD::UREM;
1455  case SRem: return ISD::SREM;
1456  case FRem: return ISD::FREM;
1457  case Shl: return ISD::SHL;
1458  case LShr: return ISD::SRL;
1459  case AShr: return ISD::SRA;
1460  case And: return ISD::AND;
1461  case Or: return ISD::OR;
1462  case Xor: return ISD::XOR;
1463  case Alloca: return 0;
1464  case Load: return ISD::LOAD;
1465  case Store: return ISD::STORE;
1466  case GetElementPtr: return 0;
1467  case Fence: return 0;
1468  case AtomicCmpXchg: return 0;
1469  case AtomicRMW: return 0;
1470  case Trunc: return ISD::TRUNCATE;
1471  case ZExt: return ISD::ZERO_EXTEND;
1472  case SExt: return ISD::SIGN_EXTEND;
1473  case FPToUI: return ISD::FP_TO_UINT;
1474  case FPToSI: return ISD::FP_TO_SINT;
1475  case UIToFP: return ISD::UINT_TO_FP;
1476  case SIToFP: return ISD::SINT_TO_FP;
1477  case FPTrunc: return ISD::FP_ROUND;
1478  case FPExt: return ISD::FP_EXTEND;
1479  case PtrToInt: return ISD::BITCAST;
1480  case IntToPtr: return ISD::BITCAST;
1481  case BitCast: return ISD::BITCAST;
1482  case AddrSpaceCast: return ISD::ADDRSPACECAST;
1483  case ICmp: return ISD::SETCC;
1484  case FCmp: return ISD::SETCC;
1485  case PHI: return 0;
1486  case Call: return 0;
1487  case Select: return ISD::SELECT;
1488  case UserOp1: return 0;
1489  case UserOp2: return 0;
1490  case VAArg: return 0;
1491  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1492  case InsertElement: return ISD::INSERT_VECTOR_ELT;
1493  case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1494  case ExtractValue: return ISD::MERGE_VALUES;
1495  case InsertValue: return ISD::MERGE_VALUES;
1496  case LandingPad: return 0;
1497  }
1498 
1499  llvm_unreachable("Unknown instruction type encountered!");
1500 }
1501 
1502 std::pair<int, MVT>
1504  Type *Ty) const {
1505  LLVMContext &C = Ty->getContext();
1506  EVT MTy = getValueType(DL, Ty);
1507 
1508  int Cost = 1;
1509  // We keep legalizing the type until we find a legal kind. We assume that
1510  // the only operation that costs anything is the split. After splitting
1511  // we need to handle two types.
1512  while (true) {
1513  LegalizeKind LK = getTypeConversion(C, MTy);
1514 
1515  if (LK.first == TypeLegal)
1516  return std::make_pair(Cost, MTy.getSimpleVT());
1517 
1518  if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1519  Cost *= 2;
1520 
1521  // Do not loop with f128 type.
1522  if (MTy == LK.second)
1523  return std::make_pair(Cost, MTy.getSimpleVT());
1524 
1525  // Keep legalizing the type.
1526  MTy = LK.second;
1527  }
1528 }
1529 
1531  bool UseTLS) const {
1532  // compiler-rt provides a variable with a magic name. Targets that do not
1533  // link with compiler-rt may also provide such a variable.
1534  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1535  const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1536  auto UnsafeStackPtr =
1537  dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1538 
1539  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1540 
1541  if (!UnsafeStackPtr) {
1542  auto TLSModel = UseTLS ?
1545  // The global variable is not defined yet, define it ourselves.
1546  // We use the initial-exec TLS model because we do not support the
1547  // variable living anywhere other than in the main executable.
1548  UnsafeStackPtr = new GlobalVariable(
1549  *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1550  UnsafeStackPtrVar, nullptr, TLSModel);
1551  } else {
1552  // The variable exists, check its type and attributes.
1553  if (UnsafeStackPtr->getValueType() != StackPtrTy)
1554  report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1555  if (UseTLS != UnsafeStackPtr->isThreadLocal())
1556  report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1557  (UseTLS ? "" : "not ") + "be thread-local");
1558  }
1559  return UnsafeStackPtr;
1560 }
1561 
1563  if (!TM.getTargetTriple().isAndroid())
1564  return getDefaultSafeStackPointerLocation(IRB, true);
1565 
1566  // Android provides a libc function to retrieve the address of the current
1567  // thread's unsafe stack pointer.
1568  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1569  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1570  Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1571  StackPtrTy->getPointerTo(0));
1572  return IRB.CreateCall(Fn);
1573 }
1574 
1575 //===----------------------------------------------------------------------===//
1576 // Loop Strength Reduction hooks
1577 //===----------------------------------------------------------------------===//
1578 
1579 /// isLegalAddressingMode - Return true if the addressing mode represented
1580 /// by AM is legal for this target, for a load/store of the specified type.
1582  const AddrMode &AM, Type *Ty,
1583  unsigned AS, Instruction *I) const {
1584  // The default implementation of this implements a conservative RISCy, r+r and
1585  // r+i addr mode.
1586 
1587  // Allows a sign-extended 16-bit immediate field.
1588  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1589  return false;
1590 
1591  // No global is ever allowed as a base.
1592  if (AM.BaseGV)
1593  return false;
1594 
1595  // Only support r+r,
1596  switch (AM.Scale) {
1597  case 0: // "r+i" or just "i", depending on HasBaseReg.
1598  break;
1599  case 1:
1600  if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1601  return false;
1602  // Otherwise we have r+r or r+i.
1603  break;
1604  case 2:
1605  if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1606  return false;
1607  // Allow 2*r as r+r.
1608  break;
1609  default: // Don't allow n * r
1610  return false;
1611  }
1612 
1613  return true;
1614 }
1615 
1616 //===----------------------------------------------------------------------===//
1617 // Stack Protector
1618 //===----------------------------------------------------------------------===//
1619 
1620 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1621 // so that SelectionDAG handle SSP.
1623  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1624  Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1626  return M.getOrInsertGlobal("__guard_local", PtrTy);
1627  }
1628  return nullptr;
1629 }
1630 
1631 // Currently only support "standard" __stack_chk_guard.
1632 // TODO: add LOAD_STACK_GUARD support.
1634  if (!M.getNamedValue("__stack_chk_guard"))
1635  new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
1637  nullptr, "__stack_chk_guard");
1638 }
1639 
1640 // Currently only support "standard" __stack_chk_guard.
1641 // TODO: add LOAD_STACK_GUARD support.
1643  return M.getNamedValue("__stack_chk_guard");
1644 }
1645 
1647  return nullptr;
1648 }
1649 
1651  return MinimumJumpTableEntries;
1652 }
1653 
1656 }
1657 
1658 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1659  return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1660 }
1661 
1663  return MaximumJumpTableSize;
1664 }
1665 
1667  MaximumJumpTableSize = Val;
1668 }
1669 
1670 //===----------------------------------------------------------------------===//
1671 // Reciprocal Estimates
1672 //===----------------------------------------------------------------------===//
1673 
1674 /// Get the reciprocal estimate attribute string for a function that will
1675 /// override the target defaults.
1677  const Function &F = MF.getFunction();
1678  return F.getFnAttribute("reciprocal-estimates").getValueAsString();
1679 }
1680 
1681 /// Construct a string for the given reciprocal operation of the given type.
1682 /// This string should match the corresponding option to the front-end's
1683 /// "-mrecip" flag assuming those strings have been passed through in an
1684 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1685 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1686  std::string Name = VT.isVector() ? "vec-" : "";
1687 
1688  Name += IsSqrt ? "sqrt" : "div";
1689 
1690  // TODO: Handle "half" or other float types?
1691  if (VT.getScalarType() == MVT::f64) {
1692  Name += "d";
1693  } else {
1694  assert(VT.getScalarType() == MVT::f32 &&
1695  "Unexpected FP type for reciprocal estimate");
1696  Name += "f";
1697  }
1698 
1699  return Name;
1700 }
1701 
1702 /// Return the character position and value (a single numeric character) of a
1703 /// customized refinement operation in the input string if it exists. Return
1704 /// false if there is no customized refinement step count.
1706  uint8_t &Value) {
1707  const char RefStepToken = ':';
1708  Position = In.find(RefStepToken);
1709  if (Position == StringRef::npos)
1710  return false;
1711 
1712  StringRef RefStepString = In.substr(Position + 1);
1713  // Allow exactly one numeric character for the additional refinement
1714  // step parameter.
1715  if (RefStepString.size() == 1) {
1716  char RefStepChar = RefStepString[0];
1717  if (RefStepChar >= '0' && RefStepChar <= '9') {
1718  Value = RefStepChar - '0';
1719  return true;
1720  }
1721  }
1722  report_fatal_error("Invalid refinement step for -recip.");
1723 }
1724 
1725 /// For the input attribute string, return one of the ReciprocalEstimate enum
1726 /// status values (enabled, disabled, or not specified) for this operation on
1727 /// the specified data type.
1728 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1729  if (Override.empty())
1730  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1731 
1732  SmallVector<StringRef, 4> OverrideVector;
1733  Override.split(OverrideVector, ',');
1734  unsigned NumArgs = OverrideVector.size();
1735 
1736  // Check if "all", "none", or "default" was specified.
1737  if (NumArgs == 1) {
1738  // Look for an optional setting of the number of refinement steps needed
1739  // for this type of reciprocal operation.
1740  size_t RefPos;
1741  uint8_t RefSteps;
1742  if (parseRefinementStep(Override, RefPos, RefSteps)) {
1743  // Split the string for further processing.
1744  Override = Override.substr(0, RefPos);
1745  }
1746 
1747  // All reciprocal types are enabled.
1748  if (Override == "all")
1750 
1751  // All reciprocal types are disabled.
1752  if (Override == "none")
1753  return TargetLoweringBase::ReciprocalEstimate::Disabled;
1754 
1755  // Target defaults for enablement are used.
1756  if (Override == "default")
1757  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1758  }
1759 
1760  // The attribute string may omit the size suffix ('f'/'d').
1761  std::string VTName = getReciprocalOpName(IsSqrt, VT);
1762  std::string VTNameNoSize = VTName;
1763  VTNameNoSize.pop_back();
1764  static const char DisabledPrefix = '!';
1765 
1766  for (StringRef RecipType : OverrideVector) {
1767  size_t RefPos;
1768  uint8_t RefSteps;
1769  if (parseRefinementStep(RecipType, RefPos, RefSteps))
1770  RecipType = RecipType.substr(0, RefPos);
1771 
1772  // Ignore the disablement token for string matching.
1773  bool IsDisabled = RecipType[0] == DisabledPrefix;
1774  if (IsDisabled)
1775  RecipType = RecipType.substr(1);
1776 
1777  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1778  return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1780  }
1781 
1782  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1783 }
1784 
1785 /// For the input attribute string, return the customized refinement step count
1786 /// for this operation on the specified data type. If the step count does not
1787 /// exist, return the ReciprocalEstimate enum value for unspecified.
1788 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
1789  if (Override.empty())
1790  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1791 
1792  SmallVector<StringRef, 4> OverrideVector;
1793  Override.split(OverrideVector, ',');
1794  unsigned NumArgs = OverrideVector.size();
1795 
1796  // Check if "all", "default", or "none" was specified.
1797  if (NumArgs == 1) {
1798  // Look for an optional setting of the number of refinement steps needed
1799  // for this type of reciprocal operation.
1800  size_t RefPos;
1801  uint8_t RefSteps;
1802  if (!parseRefinementStep(Override, RefPos, RefSteps))
1803  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1804 
1805  // Split the string for further processing.
1806  Override = Override.substr(0, RefPos);
1807  assert(Override != "none" &&
1808  "Disabled reciprocals, but specifed refinement steps?");
1809 
1810  // If this is a general override, return the specified number of steps.
1811  if (Override == "all" || Override == "default")
1812  return RefSteps;
1813  }
1814 
1815  // The attribute string may omit the size suffix ('f'/'d').
1816  std::string VTName = getReciprocalOpName(IsSqrt, VT);
1817  std::string VTNameNoSize = VTName;
1818  VTNameNoSize.pop_back();
1819 
1820  for (StringRef RecipType : OverrideVector) {
1821  size_t RefPos;
1822  uint8_t RefSteps;
1823  if (!parseRefinementStep(RecipType, RefPos, RefSteps))
1824  continue;
1825 
1826  RecipType = RecipType.substr(0, RefPos);
1827  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1828  return RefSteps;
1829  }
1830 
1831  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1832 }
1833 
1835  MachineFunction &MF) const {
1836  return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
1837 }
1838 
1840  MachineFunction &MF) const {
1841  return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
1842 }
1843 
1845  MachineFunction &MF) const {
1846  return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
1847 }
1848 
1850  MachineFunction &MF) const {
1851  return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
1852 }
1853 
1855  MF.getRegInfo().freezeReservedRegs(MF);
1856 }
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
static bool darwinHasSinCos(const Triple &TT)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:539
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:506
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
Definition: Triple.h:470
static MVT getIntegerVT(unsigned BitWidth)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:250
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:563
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LLVMContext & Context
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:359
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:241
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
bool isMacOSX() const
isMacOSX - Is this a Mac OS X triple.
Definition: Triple.h:442
static MVT getVectorVT(MVT VT, unsigned NumElements)
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
Definition: Module.cpp:142
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:343
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:260
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:487
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none...
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:223
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:285
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Y = RRC X, rotate right via carry.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:251
static cl::opt< int > MinPercentageForPredictableBranch("min-predictable-branch", cl::init(99), cl::desc("Minimum percentage (0-100) that a condition must be either true " "or false to assume that the condition is predictable"), cl::Hidden)
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled...
unsigned getVectorNumElements() const
Externally visible function.
Definition: GlobalValue.h:49
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:763
unsigned const TargetRegisterInfo * TRI
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
bool isOSFuchsia() const
Definition: Triple.h:490
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:478
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:360
Same for subtraction.
Definition: ISDOpcodes.h:254
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:292
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type...
Definition: ValueTypes.h:366
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function&#39;s at...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
A description of a memory reference used in the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
Shift and rotation operations.
Definition: ISDOpcodes.h:380
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:314
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:242
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
Definition: ISDOpcodes.h:369
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:639
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:731
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void freezeReservedRegs(const MachineFunction &)
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
This file contains the simple types necessary to represent the attributes associated with functions a...
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:771
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:311
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
Definition: CallingConv.h:103
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Position
Position to insert a new instruction relative to an existing instruction.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getNumRegClasses() const
unsigned getSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:308
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:448
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:285
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:121
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
GlobalValue * getNamedValue(StringRef Name) const
Return the global value in the module with the specified name, of arbitrary type. ...
Definition: Module.cpp:112
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:451
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:911
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:598
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:708
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:465
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:84
MVT getVectorElementType() const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function&#39;s attri...
Class to represent pointers.
Definition: DerivedTypes.h:467
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:492
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
The memory access is volatile.
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:410
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
Constant * getOrInsertGlobal(StringRef Name, Type *Ty)
Look up the specified global in the module symbol table.
Definition: Module.cpp:202
Simple binary floating point operators.
Definition: ISDOpcodes.h:260
bool isOSOpenBSD() const
Definition: Triple.h:482
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:273
unsigned getScalarSizeInBits() const
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:629
bool isWatchABI() const
Definition: Triple.h:465
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:308
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:232
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:728
const Triple & getTargetTriple() const
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:731
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(0), cl::Hidden, cl::desc("Set maximum size of jump tables; zero for no limit."))
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:386
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:757
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:524
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
Extended Value Type.
Definition: ValueTypes.h:34
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:640
const AMDGPUAS & AS
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
size_t size() const
Definition: SmallVector.h:53
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function&#39;s attributes...
const TargetMachine & getTargetMachine() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Value * getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, bool UseTLS) const
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should should continue looking for chain dependencies when trying to find...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
The memory access writes data.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
void initActions()
Initialize all of the actions to default values.
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal...
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight)...
Definition: ValueTypes.h:317
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:416
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:315
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:265
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:421
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:543
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
Module.h This file contains the declarations for the Module class.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:727
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:787
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:722
static bool Enabled
Definition: Statistic.cpp:51
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
Definition: ValueTypes.h:73
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:390
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:438
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:441
ValueTypeActionImpl ValueTypeActions
#define OP_TO_LIBCALL(Name, Enum)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:288
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that when a single input is NaN...
Definition: ISDOpcodes.h:566
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function&#39;s attribut...
The memory access reads data.
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
Definition: ISDOpcodes.h:811
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
Definition: MachineInstr.h:60
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, EVT VT) const
Certain targets require unusual breakdowns of certain types.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:363
static const size_t npos
Definition: StringRef.h:51
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineBasicBlock * emitXRayCustomEvent(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify the XRay custom event operands with target-dependent details.
StringRef getValueAsString() const
Return the attribute&#39;s value as a string.
Definition: Attributes.cpp:195
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1245
void setTypeAction(MVT VT, LegalizeTypeAction Action)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:575
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return &#39;Legal&#39;) or we ...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
MachineBasicBlock * emitXRayTypedEvent(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify the XRay typed event operands with target-dependent details.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
bool isGNUEnvironment() const
Definition: Triple.h:506
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
Same for multiplication.
Definition: ISDOpcodes.h:257
static const int LAST_INDEXED_MODE
Definition: ISDOpcodes.h:874
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that&#39;s previously inserted by insertSSPDeclarations, if any, otherwise return nul...
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool isMacOSXVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isMacOSXVersionLT - Comparison function for checking OS X version compatibility, which handles suppor...
Definition: Triple.h:427
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:321
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:659
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:737
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:621
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:317
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:238
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations...
Definition: ISDOpcodes.h:282
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:413
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
Definition: Triple.h:403
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:198
Conversion operators.
Definition: ISDOpcodes.h:435
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:444
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:298
virtual BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor, it is very likely to be predicted correctly.
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr&#39;s memory reference descriptor list.
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add &#39;1&#39; bits from Mask to this vector.
Definition: BitVector.h:759
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:242
LegalizeTypeAction getTypeAction(MVT VT) const
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:417
vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const
Loop over all of the value types that can be represented by values in the given register class...
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
CallInst * CreateCall(Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1871
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...